diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 000000000..124847630 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,28 @@ +[run] + +# tasks that aren't wired up. +omit = + lm_eval/tasks/quac.py + lm_eval/tasks/storycloze.py + lm_eval/tasks/cbt.py + lm_eval/tasks/sat.py + lm_eval/tasks/triviaqa.py + lm_eval/tasks/naturalqs.py + lm_eval/models/dummy.py + +[report] +exclude_lines = + # Skip any pass lines such as may be used for @abstractmethod + pass + + # Have to re-enable the standard pragma + pragma: no cover + + # Don't complain about missing debug-only code: + def __repr__ + if self\.debug + + # Don't complain if tests don't hit defensive assertion code: + raise AssertionError + raise NotImplementedError + return NotImplemented diff --git a/.flake8 b/.flake8 new file mode 100644 index 000000000..73f6455d1 --- /dev/null +++ b/.flake8 @@ -0,0 +1,5 @@ +[flake8] +ignore = E203, E266, E501, W503, F403, F401, C901 +max-line-length = 127 +max-complexity = 10 +select = B,C,E,F,W,T4,B9 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml deleted file mode 100644 index 0a9cd74cd..000000000 --- a/.github/workflows/lint.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: lint - -on: - pull_request: - branches: - - main - push: - branches: - - main - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - lint: - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v4 - # with: - # ref: ${{ github.head_ref }} - - - name: Set up Python 3.11 - uses: actions/setup-python@v4 - with: - python-version: '3.11' - - - name: Install pre-commit hook - run: | - pip install pre-commit==3.8.0 mmengine - pre-commit install - - - name: Linting - run: pre-commit run --all-files diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml deleted file mode 100644 index 3fa87dc1b..000000000 --- a/.github/workflows/main.yml +++ /dev/null @@ -1,71 +0,0 @@ -name: Check - -on: - pull_request: - branches: - - main - push: - branches: - - main - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - test: - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: '3.11' - - - name: Install dependencies - run: | - pip install -r requirements.txt - - - name: Download dataset - run: | - # pwd # /home/runner/work/LightCompress/LightCompress - cd tools - python download_calib_dataset.py --save_path ../check/datasets/calib --dataset_name pileval - python download_eval_dataset.py --save_path ../check/datasets/eval --dataset_name wikitext2 - - - name: Download model - run: | - cd ci_check - mkdir -p opt-125m - cp model_urls.txt opt-125m/model_urls.txt - cd opt-125m - wget -i model_urls.txt - wget --no-check-certificate https://hf-mirror.com/facebook/opt-125m/resolve/main/pytorch_model.bin - - - name: Preparation for check. - run: | - cd ci_check # /home/runner/work/LightCompress/LightCompress/ci_check - python change_files.py - - - name: Run awq check - run: | - cd ci_check # /home/runner/work/LightCompress/LightCompress/ci_check - bash run_awq.sh - - - name: Run gptq check - run: | - cd ci_check # /home/runner/work/LightCompress/LightCompress/ci_check - bash run_gptq.sh - - - name: Check success - if: ${{ success() }} - run: echo "All steps completed successfully. Success!" - - - name: Clean up - if: ${{ always() }} - run: | - cd .. - rm -rf opt-125m - rm -rf check diff --git a/.github/workflows/new_tasks.yml b/.github/workflows/new_tasks.yml new file mode 100644 index 000000000..b748aab5c --- /dev/null +++ b/.github/workflows/new_tasks.yml @@ -0,0 +1,72 @@ +name: Tasks Modified + +on: + push: + branches: + - 'main' + pull_request: + branches: + - 'main' + workflow_dispatch: +# comment/edit out the above to stop/change the triggers +jobs: + changed_files: + runs-on: ubuntu-latest # windows-latest || macos-latest + timeout-minutes: 120 + name: Scan for changed tasks + steps: + - name: checkout + uses: actions/checkout@v3 + with: + fetch-depth: 2 # OR "2" -> To retrieve the preceding commit. + + # Uses the tj-actions/changed-files action to check for changes. + # Outputs provided here: https://github.com/tj-actions/changed-files#outputs + # The `files_yaml` input optionally takes a yaml string to specify filters, + # and prepends the filter name to the standard output names. + - name: Check task folders + id: changed-tasks + uses: tj-actions/changed-files@v44.5.2 + with: + # tasks checks the tasks folder and api checks the api folder for changes + files_yaml: | + tasks: + - lm_eval/tasks/** + api: + - lm_eval/api/** + write_output_files: true + + # The next step is optional; the files are written to the workspace by default (above). + # so it's just for debugging + - name: Run Tests + if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true' + run: | + echo .github/outputs/tasks_all_changed_and_modified_files.txt >> 'GITHUB_ENV' + echo "One or more test file(s) has changed." + echo "List of all the files that have changed: ${{ steps.changed-tasks.outputs.tasks_all_modified_files }}" + + - name: Set up Python 3.9 + if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true' + uses: actions/setup-python@v4 + with: + python-version: 3.9 + cache: 'pip' + cache-dependency-path: setup.py + - name: Install dependencies + if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true' + run: | + python -m pip install --upgrade pip + pip install -e '.[dev,ifeval]' --extra-index-url https://download.pytorch.org/whl/cpu + # Install optional git dependencies + # pip install bleurt@https://github.com/google-research/bleurt/archive/b610120347ef22b494b6d69b4316e303f5932516.zip#egg=bleurt + # if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + - name: Test with pytest + # if new tasks are added, run tests on them + if: steps.changed-tasks.outputs.tasks_any_modified == 'true' + run: python -m pytest tests/test_tasks.py -s -vv + # if api is modified, run tests on it + - name: Test more tasks with pytest + env: + API: true + if: steps.changed-tasks.outputs.api_any_modified == 'true' + run: python -m pytest tests/test_tasks.py -s -vv diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 000000000..be3481754 --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,78 @@ +name: Publish Python distribution to PyPI + +on: + push: + tags: + - '*' + +jobs: + build: + name: Build distribution + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.x" + + - name: Install pypa/build + run: >- + python3 -m + pip install + build + --user + - name: Build a binary wheel and a source tarball + run: python3 -m build + - name: Store the distribution packages + uses: actions/upload-artifact@v3 + with: + name: python-package-distributions + path: dist/ + + publish-to-pypi: + name: >- + Publish Python distribution to PyPI + if: startsWith(github.ref, 'refs/tags/') # only publish to PyPI on tag pushes + needs: + - build + runs-on: ubuntu-latest + environment: + name: pypi + url: https://pypi.org/p/lm_eval + permissions: + id-token: write # IMPORTANT: mandatory for trusted publishing + + steps: + - name: Download all the dists + uses: actions/download-artifact@v3 + with: + name: python-package-distributions + path: dist/ + - name: Publish distribution to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + + publish-to-testpypi: + name: Publish Python distribution to TestPyPI + needs: + - build + runs-on: ubuntu-latest + + environment: + name: testpypi + url: https://test.pypi.org/p/lm_eval + + permissions: + id-token: write # IMPORTANT: mandatory for trusted publishing + + steps: + - name: Download all the dists + uses: actions/download-artifact@v3 + with: + name: python-package-distributions + path: dist/ + - name: Publish distribution to TestPyPI + uses: pypa/gh-action-pypi-publish@release/v1 + with: + repository-url: https://test.pypi.org/legacy/ diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml new file mode 100644 index 000000000..49b85fb9a --- /dev/null +++ b/.github/workflows/unit_tests.yml @@ -0,0 +1,95 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python +# just comment out unwanted steps to turn off the test. +name: Unit Tests + +on: + push: + branches: + - 'main' + pull_request: + branches: + - 'main' + workflow_dispatch: +# Jobs run concurrently and steps run sequentially within a job. +# jobs: linter and cpu_tests. Add more jobs/steps as required. +jobs: + linter: + name: Linters + runs-on: ubuntu-latest + timeout-minutes: 5 + + steps: + - name: Checkout Code + uses: actions/checkout@v4 + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: 3.8 + cache: pip + cache-dependency-path: pyproject.toml + - name: Pre-Commit + env: + SKIP: "no-commit-to-branch,mypy" + + uses: pre-commit/action@v3.0.1 +# # mypy turned off for now +# - name: Lint with mypy +# run: mypy . --ignore-missing-imports --check-untyped-defs --explicit-package-bases --warn-unreachable +# Job 2 + testcpu: + name: CPU Tests + runs-on: ubuntu-latest + strategy: + matrix: + python-version: [ "3.8", "3.9", "3.10", "3.11" ] + timeout-minutes: 30 + steps: + - name: Checkout Code + uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: pip + cache-dependency-path: pyproject.toml + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -e '.[dev,sentencepiece,api]' --extra-index-url https://download.pytorch.org/whl/cpu +# Install optional git dependencies +# pip install bleurt@https://github.com/google-research/bleurt/archive/b610120347ef22b494b6d69b4316e303f5932516.zip#egg=bleurt +# if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + - name: Test with pytest + run: python -m pytest --showlocals -s -vv -n=auto --ignore=tests/models/test_neuralmagic.py --ignore=tests/models/test_openvino.py + - name: Archive artifacts + uses: actions/upload-artifact@v3 + with: + name: output_results + path: | + test_logs/* + testmodels: + name: External LM Tests + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - name: Checkout Code + uses: actions/checkout@v4 + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: 3.8 + cache: pip + cache-dependency-path: pyproject.toml + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -e '.[dev,optimum,deepsparse,sparseml,api]' --extra-index-url https://download.pytorch.org/whl/cpu + - name: Test with pytest + run: python -m pytest tests/models --showlocals -s -vv + - name: Archive artifacts + uses: actions/upload-artifact@v3 + with: + name: output_results + path: | + test_logs/* diff --git a/.gitignore b/.gitignore index 896b38a12..c9278761b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,24 +1,24 @@ -*.pth -*.pt -*.onnx -*.pk -*.model -*.zip -*.tar +env *.pyc -*.log -*.o -*.so -*.a -*.exe -*.out +output/ +data/ +lm_cache .idea -**.DS_Store** -**/__pycache__/** -**.swp +build +dist +*.egg-info +venv .vscode/ -.env -save* -.log -*.pid -*.ipynb* +temp +__pycache__ +.ipynb_checkpoints +temp +test_logs/ +# IPython +profile_default/ +ipython_config.py +# don't track (the default location of) the cached requests +lm_eval/caching/.cache +# don't track files created by wandb +wandb +examples/wandb diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index 977234d5c..000000000 --- a/.gitmodules +++ /dev/null @@ -1,4 +0,0 @@ -[submodule "lm-evaluation-harness"] - path = lm-evaluation-harness - url = https://github.com/ModelTC/llmc.git - branch = lm-eval diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml old mode 100755 new mode 100644 index 4b345f0c3..d066cd9e6 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,72 +1,54 @@ -exclude: | - (?x)^( - imgs/| - docs/| - ci_check/| - requirements/| - scripts/ - ) +# Ignore test linting to avoid conflicting changes to version stability. +exclude: ^tests/testdata/ repos: - - repo: https://github.com/PyCQA/flake8 - rev: 7.2.0 - hooks: - - id: flake8 - args: ["--max-line-length=100", "--ignore=F403,F401,W503,W504,E402"] - exclude: configs/ - - repo: https://github.com/PyCQA/isort - rev: 6.0.1 - hooks: - - id: isort - exclude: configs/ - - repo: https://github.com/pre-commit/mirrors-yapf - rev: v0.32.0 - hooks: - - id: yapf - args: ["--style={column_limit: 100}"] - exclude: configs/ - - repo: https://github.com/codespell-project/codespell - rev: v2.4.1 - hooks: - - id: codespell - exclude: | - (?x)^( - .*\.jsonl| - .*\.md.template| - configs/ - ) - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v5.0.0 + rev: v4.5.0 hooks: - - id: trailing-whitespace - exclude: | - (?x)^( - configs/.*?/.*\.txt - ) + - id: check-added-large-files + - id: check-ast + - id: check-byte-order-marker + - id: check-case-conflict + - id: check-json + - id: check-merge-conflict + args: [--assume-in-merge] + - id: check-symlinks - id: check-yaml + args: ["--unsafe"] + - id: destroyed-symlinks + - id: detect-private-key - id: end-of-file-fixer - exclude: | - (?x)^( - configs/.*?/.*\.txt - ) + - id: no-commit-to-branch + always_run: false - id: requirements-txt-fixer - - id: double-quote-string-fixer - - id: check-merge-conflict + - id: trailing-whitespace + args: [--markdown-linebreak-ext=md] + - id: fix-byte-order-marker + exclude: docs/CNAME - id: fix-encoding-pragma - args: ["--remove"] + args: [--remove] - id: mixed-line-ending - args: ["--fix=lf"] - - repo: https://github.com/executablebooks/mdformat - rev: 0.7.16 + args: [--fix=lf] + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.4.8 hooks: - - id: mdformat - args: ["--number", "--table-width", "200"] - additional_dependencies: - - mdformat-openmmlab - - mdformat_frontmatter - - linkify-it-py - exclude: configs/ - - repo: https://github.com/myint/docformatter - rev: v1.7.6 + # Run the linter. + - id: ruff + args: + - --fix + # Run the formatter. + - id: ruff-format + - repo: https://github.com/codespell-project/codespell + rev: v2.3.0 hooks: - - id: docformatter - args: ["--in-place", "--wrap-descriptions", "100"] + - id: codespell + exclude: > + (?x)^( + .*\.json|ignore.txt|lm_eval/tasks/.*|.*yaml|.*\.ipynb + )$ + args: [--check-filenames, --check-hidden, --ignore-words=ignore.txt] +# - repo: https://github.com/pre-commit/mirrors-mypy +# rev: v1.5.1 +# hooks: +# - id: mypy +# additional_dependencies: [".[sentencepiece,multilingual,promptsource,gptq]", "types-PyYAML", "types-requests"] +# exclude: ^tests/.*$ diff --git a/CITATION.bib b/CITATION.bib new file mode 100644 index 000000000..4ec33f139 --- /dev/null +++ b/CITATION.bib @@ -0,0 +1,10 @@ +@misc{eval-harness, + author = {Gao, Leo and Tow, Jonathan and Abbasi, Baber and Biderman, Stella and Black, Sid and DiPofi, Anthony and Foster, Charles and Golding, Laurence and Hsu, Jeffrey and Le Noac'h, Alain and Li, Haonan and McDonell, Kyle and Muennighoff, Niklas and Ociepa, Chris and Phang, Jason and Reynolds, Laria and Schoelkopf, Hailey and Skowron, Aviya and Sutawika, Lintang and Tang, Eric and Thite, Anish and Wang, Ben and Wang, Kevin and Zou, Andy}, + title = {A framework for few-shot language model evaluation}, + month = 12, + year = 2023, + publisher = {Zenodo}, + version = {v0.4.0}, + doi = {10.5281/zenodo.10256836}, + url = {https://zenodo.org/records/10256836} +} diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 000000000..5a08ab244 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1 @@ +* @haileyschoelkopf @lintangsutawika @baberabb diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index ecbe5f5d3..000000000 --- a/Dockerfile +++ /dev/null @@ -1,47 +0,0 @@ -FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu22.04 AS base - -WORKDIR /app - -ENV DEBIAN_FRONTEND=noninteractive - -RUN sed -i 's|http://archive.ubuntu.com/ubuntu/|https://mirrors.tuna.tsinghua.edu.cn/ubuntu/|g' /etc/apt/sources.list && \ - sed -i 's|http://security.ubuntu.com/ubuntu/|https://mirrors.tuna.tsinghua.edu.cn/ubuntu/|g' /etc/apt/sources.list - -RUN apt-get update && \ - apt-get install -y vim tmux zip unzip wget git cmake build-essential software-properties-common curl libibverbs-dev ca-certificates iproute2 ffmpeg libsm6 libxext6 && \ - add-apt-repository ppa:deadsnakes/ppa && \ - apt-get update && \ - apt-get install -y python3.11 python3.11-venv python3.11-dev python3-pip && \ - apt-get clean && rm -rf /var/lib/apt/lists/* - -RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1 && \ - update-alternatives --install /usr/bin/python python /usr/bin/python3.11 1 - -RUN python -m pip install --upgrade pip - -RUN pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple - -RUN pip install packaging ninja opencv-python - -# download torch and torchvision wheels into whls filefold -COPY whls/*.whl /app/ - -RUN pip install --no-cache-dir torch-*.whl torchvision*.whl - -WORKDIR /workspace - -# download flash-attention source code -COPY flash-attention /workspace/flash-attention - -RUN cd flash-attention && pip install --no-cache-dir -v -e . - -# download fast-hadamard-transform source code -COPY fast-hadamard-transform /workspace/fast-hadamard-transform - -RUN cd fast-hadamard-transform && pip install --no-cache-dir -v -e . - -COPY requirements/runtime.txt /app/ - -RUN pip install --no-cache-dir -r /app/runtime.txt - -RUN rm -rf /app diff --git a/Dockerfile_cu124 b/Dockerfile_cu124 deleted file mode 100644 index 7fa77a700..000000000 --- a/Dockerfile_cu124 +++ /dev/null @@ -1,47 +0,0 @@ -FROM nvidia/cuda:12.4.1-cudnn-devel-ubuntu22.04 AS base - -WORKDIR /app - -ENV DEBIAN_FRONTEND=noninteractive - -RUN sed -i 's|http://archive.ubuntu.com/ubuntu/|https://mirrors.tuna.tsinghua.edu.cn/ubuntu/|g' /etc/apt/sources.list && \ - sed -i 's|http://security.ubuntu.com/ubuntu/|https://mirrors.tuna.tsinghua.edu.cn/ubuntu/|g' /etc/apt/sources.list - -RUN apt-get update && \ - apt-get install -y vim tmux zip unzip wget git cmake build-essential software-properties-common curl libibverbs-dev ca-certificates iproute2 ffmpeg libsm6 libxext6 && \ - add-apt-repository ppa:deadsnakes/ppa && \ - apt-get update && \ - apt-get install -y python3.11 python3.11-venv python3.11-dev python3-pip && \ - apt-get clean && rm -rf /var/lib/apt/lists/* - -RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1 && \ - update-alternatives --install /usr/bin/python python /usr/bin/python3.11 1 - -RUN python -m pip install --upgrade pip - -RUN pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple - -RUN pip install packaging ninja opencv-python - -# download torch and torchvision wheels into whls filefold -COPY whls/*.whl /app/ - -RUN pip install --no-cache-dir torch-*.whl torchvision*.whl - -WORKDIR /workspace - -# download flash-attention source code -COPY flash-attention /workspace/flash-attention - -RUN cd flash-attention && pip install --no-cache-dir -v -e . - -# download fast-hadamard-transform source code -COPY fast-hadamard-transform /workspace/fast-hadamard-transform - -RUN cd fast-hadamard-transform && pip install --no-cache-dir -v -e . - -COPY requirements/runtime.txt /app/ - -RUN pip install --no-cache-dir -r /app/runtime.txt - -RUN rm -rf /app diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 000000000..12e606318 --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 EleutherAI + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md index 16a47d5a5..e973cdb7e 100644 --- a/README.md +++ b/README.md @@ -1,270 +1,497 @@ -
-

LightCompress: Towards Accurate and Efficient AIGC Model Compression

+# Language Model Evaluation Harness -llmc +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.10256836.svg)](https://doi.org/10.5281/zenodo.10256836) -llmc +--- -[![License](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) -[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/ModelTC/LightCompress) -[![arXiv](https://img.shields.io/badge/LLMC-2405.06001-b31b1b)](https://arxiv.org/abs/2405.06001) -[![arXiv](https://img.shields.io/badge/LLMC+-2508.09981-b31b1b)](https://arxiv.org/abs/2508.09981) -[![Discord Banner](https://img.shields.io/discord/1139835312592392214?logo=discord&logoColor=white)](https://discord.com/invite/NfJzbkK3jY) -[![QQ](https://img.shields.io/badge/QQ-EB1923?logo=tencent-qq&logoColor=white)](http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=I9IGPWWj8uuRXWH3_ELWjouf6gkIMgUl&authKey=GA3WbFAsm90ePJf%2FCbc7ZyXXq4ShQktlBaLxgqS5yuSPAsr3%2BDKMRdosUiLYoilO&noverify=0&group_code=526192592) -[![Doc](https://img.shields.io/badge/docs-English-99cc2)](https://llmc-en.readthedocs.io/en/latest/) -[![Doc](https://img.shields.io/badge/文档-中文-99cc2)](https://llmc-zhcn.readthedocs.io/en/latest/)  +*Latest News 📣* -**\[ English | [中文](README_zh.md) \]** +- [2024/07] [API model](docs/API_guide.md) support has been updated and refactored, introducing support for batched and async requests, and making it significantly easier to customize and use for your own purposes. **To run Llama 405B, we recommend using VLLM's OpenAI-compliant API to host the model, and use the `local-completions` model type to evaluate the model.** +- [2024/07] New Open LLM Leaderboard tasks have been added ! You can find them under the [leaderboard](lm_eval/tasks/leaderboard/README.md) task group. -
+--- -> **📢 Notice**: This repository was formerly known as **LLMC** and has been renamed to **LightCompress**. +## Announcement +**A new v0.4.0 release of lm-evaluation-harness is available** ! -**LightCompress** is an off-the-shell tool designed for compressing aigc models(LLM, VLM, Diffusion ...), leveraging state-of-the-art compression algorithms to enhance efficiency and reduce model size without compromising performance. You can download the Docker image that can run LightCompress with the following command. Users in mainland China are recommended to use Alibaba Cloud Docker. +New updates and features include: -```shell -# docker hub: https://hub.docker.com/r/llmcompression/llmc -docker pull llmcompression/llmc:pure-latest +- **New Open LLM Leaderboard tasks have been added ! You can find them under the [leaderboard](lm_eval/tasks/leaderboard/README.md) task group.** +- Internal refactoring +- Config-based task creation and configuration +- Easier import and sharing of externally-defined task config YAMLs +- Support for Jinja2 prompt design, easy modification of prompts + prompt imports from Promptsource +- More advanced configuration options, including output post-processing, answer extraction, and multiple LM generations per document, configurable fewshot settings, and more +- Speedups and new modeling libraries supported, including: faster data-parallel HF model usage, vLLM support, MPS support with HuggingFace, and more +- Logging and usability changes +- New tasks including CoT BIG-Bench-Hard, Belebele, user-defined task groupings, and more -# aliyun docker: registry.cn-hangzhou.aliyuncs.com/yongyang/llmcompression:[tag] -docker pull registry.cn-hangzhou.aliyuncs.com/yongyang/llmcompression:pure-latest +Please see our updated documentation pages in `docs/` for more details. + +Development will be continuing on the `main` branch, and we encourage you to give us feedback on what features are desired and how to improve the library further, or ask questions, either in issues or PRs on GitHub, or in the [EleutherAI discord](https://discord.gg/eleutherai)! + +--- + +## Overview + +This project provides a unified framework to test generative language models on a large number of different evaluation tasks. + +**Features:** +- Over 60 standard academic benchmarks for LLMs, with hundreds of subtasks and variants implemented. +- Support for models loaded via [transformers](https://github.com/huggingface/transformers/) (including quantization via [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ)), [GPT-NeoX](https://github.com/EleutherAI/gpt-neox), and [Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed/), with a flexible tokenization-agnostic interface. +- Support for fast and memory-efficient inference with [vLLM](https://github.com/vllm-project/vllm). +- Support for commercial APIs including [OpenAI](https://openai.com), and [TextSynth](https://textsynth.com/). +- Support for evaluation on adapters (e.g. LoRA) supported in [HuggingFace's PEFT library](https://github.com/huggingface/peft). +- Support for local models and benchmarks. +- Evaluation with publicly available prompts ensures reproducibility and comparability between papers. +- Easy support for custom prompts and evaluation metrics. + +The Language Model Evaluation Harness is the backend for 🤗 Hugging Face's popular [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard), has been used in [hundreds of papers](https://scholar.google.com/scholar?oi=bibs&hl=en&authuser=2&cites=15052937328817631261,4097184744846514103,1520777361382155671,17476825572045927382,18443729326628441434,14801318227356878622,7890865700763267262,12854182577605049984,15641002901115500560,5104500764547628290), and is used internally by dozens of organizations including NVIDIA, Cohere, BigScience, BigCode, Nous Research, and Mosaic ML. + +## Install + +To install the `lm-eval` package from the github repository, run: + +```bash +git clone https://github.com/EleutherAI/lm-evaluation-harness +cd lm-evaluation-harness +pip install -e . ``` -**Community**: [Discord Server](https://discord.com/invite/NfJzbkK3jY), [Tencent QQ Group](http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=I9IGPWWj8uuRXWH3_ELWjouf6gkIMgUl&authKey=GA3WbFAsm90ePJf%2FCbc7ZyXXq4ShQktlBaLxgqS5yuSPAsr3%2BDKMRdosUiLYoilO&noverify=0&group_code=526192592). +We also provide a number of optional dependencies for extended functionality. A detailed table is available at the end of this document. -**Docs**: [English](https://llmc-en.readthedocs.io/en/latest/), [Chinese](https://llmc-zhcn.readthedocs.io/en/latest/). +## Basic Usage +### User Guide -## :fire: Latest News +A user guide detailing the full list of supported arguments is provided [here](./docs/interface.md), and on the terminal by calling `lm_eval -h`. Alternatively, you can use `lm-eval` instead of `lm_eval`. -- **August 13, 2025:** 🚀 We have open-sourced our compression solution for **vision-language models (VLMs)**, supporting over a total of **20 algorithms** that cover both **token reduction** and **quantization**. This release enables flexible, plug-and-play compression strategies for a wide range of multimodal tasks. please refer to the [documentation](https://llmc-en.readthedocs.io/en/latest/advanced/token_reduction.html). +A list of supported tasks (or groupings of tasks) can be viewed with `lm-eval --tasks list`. Task descriptions and links to corresponding subfolders are provided [here](./lm_eval/tasks/README.md). -- **May 12, 2025:** 🔥 We now fully support quantization for the **`Wan2.1`** series of video generation models and provide export of truly quantized **INT8/FP8** weights, compatible with the [lightx2v](https://github.com/ModelTC/lightx2v) inference framework. For details, please refer to the [lightx2v documentation](https://llmc-en.readthedocs.io/en/latest/backend/lightx2v.html). +### Hugging Face `transformers` -- **Feb 07, 2025:** 🔥 We now fully support quantization of large-scale **`MOE`** models like **`DeepSeekv3`**, **`DeepSeek-R1`**, and **`DeepSeek-R1-zero`** with **`671B`** parameters. You can now directly load FP8 weights without any extra conversion. AWQ and RTN quantization can run on a single 80GB GPU, and we also support the export of true quantized **INT4/INT8** weights. +To evaluate a model hosted on the [HuggingFace Hub](https://huggingface.co/models) (e.g. GPT-J-6B) on `hellaswag` you can use the following command (this assumes you are using a CUDA-compatible GPU): -- **Nov 20, 2024:** 🔥 We now fully support the quantization of ✨`DeepSeekv2(2.5)` and other `MOE` models, as well as ✨`Qwen2VL`, `Llama3.2`, and other `VLM` models. Supported quantization methods include ✅integer quantization, ✅floating-point quantization, and advanced algorithms like ✅AWQ, ✅GPTQ, ✅SmoothQuant, and ✅Quarot. +```bash +lm_eval --model hf \ + --model_args pretrained=EleutherAI/gpt-j-6B \ + --tasks hellaswag \ + --device cuda:0 \ + --batch_size 8 +``` -- **Nov 12, 2024:** 🔥 We have added support for 💥`static per-tensor activation quantization` across various models and algorithms, covering ✅integer quantization and ✅floating-point quantization to further optimize performance and efficiency. Additionally, we now support exporting ✨`real quantized models` and using the [VLLM](https://github.com/vllm-project/vllm) and [SGLang](https://github.com/sgl-project/sglang) backends for inference acceleration. For more details, refer to the [VLLM documentation](https://llmc-en.readthedocs.io/en/latest/backend/vllm.html) and [SGLang documentation](https://llmc-en.readthedocs.io/en/latest/backend/sglang.html). +Additional arguments can be provided to the model constructor using the `--model_args` flag. Most notably, this supports the common practice of using the `revisions` feature on the Hub to store partially trained checkpoints, or to specify the datatype for running a model: -- **Sep 26, 2024:** 🔥 We now support exporting 💥`FP8 quantized(E4M3, E5M2)` models from 🚀`LLMC` to advanced inference backends such as [VLLM](https://github.com/vllm-project/vllm) and [SGLang](https://github.com/sgl-project/sglang). For detailed usage, please refer to the [VLLM documentation](https://llmc-en.readthedocs.io/en/latest/backend/vllm.html) and [SGLang documentation](https://llmc-en.readthedocs.io/en/latest/backend/sglang.html). +```bash +lm_eval --model hf \ + --model_args pretrained=EleutherAI/pythia-160m,revision=step100000,dtype="float" \ + --tasks lambada_openai,hellaswag \ + --device cuda:0 \ + --batch_size 8 +``` -
-Previous News +Models that are loaded via both `transformers.AutoModelForCausalLM` (autoregressive, decoder-only GPT style models) and `transformers.AutoModelForSeq2SeqLM` (such as encoder-decoder models like T5) in Huggingface are supported. -- **Sep 24, 2024:** 🔥 We have officially released ✅INT4 and ✅INT8 models of ✨`Llama-3.1-405B`, quantized using 🚀`LLMC` in `save_lightllm` mode. You can download the model parameters [here](https://huggingface.co/Dongz/llama31-405b-quant). +Batch size selection can be automated by setting the ```--batch_size``` flag to ```auto```. This will perform automatic detection of the largest batch size that will fit on your device. On tasks where there is a large difference between the longest and shortest example, it can be helpful to periodically recompute the largest batch size, to gain a further speedup. To do this, append ```:N``` to above flag to automatically recompute the largest batch size ```N``` times. For example, to recompute the batch size 4 times, the command would be: -- **Sep 23, 2024:** 🔥 We now support exporting ✨`real quantized(INT4, INT8)` models from 🚀`LLMC` to advanced inference backends such as [VLLM](https://github.com/vllm-project/vllm), [SGLang](https://github.com/sgl-project/sglang), [AutoAWQ](https://github.com/casper-hansen/AutoAWQ), and [MLC-LLM](https://github.com/mlc-ai/mlc-llm) for quantized inference deployment, enabling ✨`reduced memory usage` and ✨`faster inference speeds`. - For detailed usage, please refer to the [VLLM documentation](https://llmc-en.readthedocs.io/en/latest/backend/vllm.html), [SGLang documentation](https://llmc-en.readthedocs.io/en/latest/backend/sglang.html), [AutoAWQ documentation](https://llmc-en.readthedocs.io/en/latest/backend/autoawq.html), and [MLC-LLM documentation](https://llmc-en.readthedocs.io/en/latest/backend/mlcllm.html). +```bash +lm_eval --model hf \ + --model_args pretrained=EleutherAI/pythia-160m,revision=step100000,dtype="float" \ + --tasks lambada_openai,hellaswag \ + --device cuda:0 \ + --batch_size auto:4 +``` -- **Sep 09, 2024:** 🔥 We provide some configs of our best practice towards superior performance (see Best Practice [here](https://llmc-en.readthedocs.io/en/latest/)). +> [!Note] +> Just like you can provide a local path to `transformers.AutoModel`, you can also provide a local path to `lm_eval` via `--model_args pretrained=/path/to/model` -* **Sep 03, 2024:** 🔥 We support [opencompass](https://github.com/open-compass/opencompass) 🤗 to eval 🚀`LLMC` model. Follow this [doc](https://llmc-en.readthedocs.io/en/latest/advanced/model_test_v2.html) and have a try! +#### Multi-GPU Evaluation with Hugging Face `accelerate` -* **Aug 22, 2024:** 🔥We support lots of small language models, including current SOTA [SmolLM](https://huggingface.co/collections/HuggingFaceTB/smollm-6695016cad7167254ce15966)(see [Supported Model List](#supported-model-list)). +We support three main ways of using Hugging Face's [accelerate 🚀](https://github.com/huggingface/accelerate) library for multi-GPU evaluation. -* **Aug 22, 2024:** 🔥 Additionally, we also support down stream task evaluation through our modified [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness) 🤗. Specifically, people can first employ `save_trans` mode(see `save` part in [Configuration](https://llmc-en.readthedocs.io/en/latest/configs.html)) to save a weight modified model. After obtaining the transformed model, they can directly evaluate the quantized model referring to [run_lm_eval.sh](scripts/run_lm_eval.sh). More details can be found in [here](https://llmc-en.readthedocs.io/en/latest/advanced/model_test_v1.html). +To perform *data-parallel evaluation* (where each GPU loads a **separate full copy** of the model), we leverage the `accelerate` launcher as follows: -* **Jul 23, 2024:** 🍺🍺🍺 We release a brand new version benchmark paper: +``` +accelerate launch -m lm_eval --model hf \ + --tasks lambada_openai,arc_easy \ + --batch_size 16 +``` +(or via `accelerate launch --no-python lm_eval`). - [**LLMC: Benchmarking Large Language Model Quantization with a Versatile Compression Toolkit**](https://arxiv.org/abs/2405.06001v2). +For cases where your model can fit on a single GPU, this allows you to evaluate on K GPUs K times faster than on one. - [Ruihao Gong\*](https://xhplus.github.io/), [Yang Yong\*](https://github.com/helloyongyang), [Shiqiao Gu\*](https://github.com/gushiqiao), [Yushi Huang\*](https://github.com/Harahan), [Chengtao Lv](https://scholar.google.com/citations?user=r8vseSUAAAAJ&hl=en), [Yunchen Zhang](https://scholar.google.com/citations?user=glkWFyUAAAAJ&hl=en), [Xianglong Liu📧](https://xlliu-beihang.github.io/), [Dacheng Tao](https://scholar.google.com/citations?user=RwlJNLcAAAAJ&hl=en) +**WARNING**: This setup does not work with FSDP model sharding, so in `accelerate config` FSDP must be disabled, or the NO_SHARD FSDP option must be used. - (\* denotes equal contribution, 📧 denotes corresponding author.) +The second way of using `accelerate` for multi-GPU evaluation is when your model is *too large to fit on a single GPU.* -- **Jul 16, 2024:** 🔥We support Wanda/Naive(Magnitude) for llm sparsification and layer-wise mix bits quantization now! +In this setting, run the library *outside the `accelerate` launcher*, but passing `parallelize=True` to `--model_args` as follows: -- **Jul 14, 2024:** 🔥We support rotation based quantization QuaRot now! +``` +lm_eval --model hf \ + --tasks lambada_openai,arc_easy \ + --model_args parallelize=True \ + --batch_size 16 +``` -- **May 17, 2024:** 🚀 We support some advanced large models, e.g., LLaVA, Mixtral, LLaMA V3 and Qwen V2 now. Have a try! +This means that your model's weights will be split across all available GPUs. -- **May 13, 2024:** 🍺🍺🍺 We release our quantization benchmark paper: +For more advanced users or even larger models, we allow for the following arguments when `parallelize=True` as well: +- `device_map_option`: How to split model weights across available GPUs. defaults to "auto". +- `max_memory_per_gpu`: the max GPU memory to use per GPU in loading the model. +- `max_cpu_memory`: the max amount of CPU memory to use when offloading the model weights to RAM. +- `offload_folder`: a folder where model weights will be offloaded to disk if needed. - [**LLM-QBench: A Benchmark Towards the Best Practice for Post-training Quantization of Large Language Models**](https://arxiv.org/abs/2405.06001). +The third option is to use both at the same time. This will allow you to take advantage of both data parallelism and model sharding, and is especially useful for models that are too large to fit on a single GPU. - [Ruihao Gong\*](https://xhplus.github.io/), [Yang Yong\*](https://github.com/helloyongyang), [Shiqiao Gu\*](https://github.com/gushiqiao), [Yushi Huang\*](https://github.com/Harahan), [Yunchen Zhang](https://scholar.google.com/citations?user=glkWFyUAAAAJ&hl=en), [Xianglong Liu📧](https://xlliu-beihang.github.io/), [Dacheng Tao](https://scholar.google.com/citations?user=RwlJNLcAAAAJ&hl=en) +``` +accelerate launch --multi_gpu --num_processes {nb_of_copies_of_your_model} \ + -m lm_eval --model hf \ + --tasks lambada_openai,arc_easy \ + --model_args parallelize=True \ + --batch_size 16 +``` - (\* denotes equal contribution, 📧 denotes corresponding author.) +To learn more about model parallelism and how to use it with the `accelerate` library, see the [accelerate documentation](https://huggingface.co/docs/transformers/v4.15.0/en/parallelism) -
- comp -
+**Warning: We do not natively support multi-node evaluation using the `hf` model type! Please reference [our GPT-NeoX library integration](https://github.com/EleutherAI/gpt-neox/blob/main/eval.py) for an example of code in which a custom multi-machine evaluation script is written.** - We modularly and fairly benchmark the quantization techniques considering calibration cost, inference efficiency, and quantized accuracy. Near 600 experiments on diverse models and datasets provide three insightful takeaways - on the calibration data, algorithm pipeline, and quantization configuration selection. Based on the takeaways, a best practice for the LLM PTQ pipeline is designed, to achieve the best accuracy and efficiency performance balance - under various scenarios. +**Note: we do not currently support multi-node evaluations natively, and advise using either an externally hosted server to run inference requests against, or creating a custom integration with your distributed framework [as is done for the GPT-NeoX library](https://github.com/EleutherAI/gpt-neox/blob/main/eval_tasks/eval_adapter.py).** -- **Mar 07, 2024:** 🚀 We release the quantization part of a powerful and efficient LLM compression tool. Notably, our benchmark paper is coming soon😊. +### NVIDIA `nemo` models -
+[NVIDIA NeMo Framework](https://github.com/NVIDIA/NeMo) is a generative AI framework built for researchers and pytorch developers working on language models. -## 🚀 Highlight Feature +To evaluate a `nemo` model, start by installing NeMo following [the documentation](https://github.com/NVIDIA/NeMo?tab=readme-ov-file#installation). We highly recommended to use the NVIDIA PyTorch or NeMo container, especially if having issues installing Apex or any other dependencies (see [latest released containers](https://github.com/NVIDIA/NeMo/releases)). Please also install the lm evaluation harness library following the instructions in [the Install section](https://github.com/EleutherAI/lm-evaluation-harness/tree/main?tab=readme-ov-file#install). -- 💥**Comprehensive Algorithm Support**: Provides a broad range of ✨`SOTA compression algorithms`, including ✅quantization, ✅mixed-precision quantization, and ✅sparsity, while maintaining accuracy consistent with the original repositories. ✨`Quantization best practices` (see 🚀`Best Practices` [here](https://llmc-en.readthedocs.io/en/latest/)) are also available to ensure optimal performance and efficiency. +NeMo models can be obtained through [NVIDIA NGC Catalog](https://catalog.ngc.nvidia.com/models) or in [NVIDIA's Hugging Face page](https://huggingface.co/nvidia). In [NVIDIA NeMo Framework](https://github.com/NVIDIA/NeMo/tree/main/scripts/nlp_language_modeling) there are conversion scripts to convert the `hf` checkpoints of popular models like llama, falcon, mixtral or mpt to `nemo`. -- 💥**Supported Formats**: Supports both ✨`quantization` (integer and floating-point) and ✨`sparsity`, specifically including ✅weight-activation, ✅weight-only, ✅mixed-precision quantization, as well as ✅structured and ✅unstructured sparsity. +Run a `nemo` model on one GPU: +```bash +lm_eval --model nemo_lm \ + --model_args path= \ + --tasks hellaswag \ + --batch_size 32 +``` -- 💥**Wide Model Support**: Offers support for a diverse array of ✨`LLM models`, including ✅LLama, ✅Mistral, ✅InternLM2, ✅Qwen2, among others, as well as ✅MOE(DeepSeekv2, Deepseek-R1) and ✅VLM(Llama3.2-vision, Qwen2-vl) models (see [Supported Model List](#supported-model-list)). +It is recommended to unpack the `nemo` model to avoid the unpacking inside the docker container - it may overflow disk space. For that you can run: -- 💥**Multi-backend Compatibility**: Seamlessly integrates with various backends for enhanced deployment flexibility. Multiple quantization settings and model formats are compatible with a wide range of backends and hardware platforms, such as ✅VLLM, ✅Sglang, ✅LightLLM, ✅MLC-LLM, and ✅AutoAWQ, making it highly versatile(see Section `Backend` [here](https://llmc-en.readthedocs.io/en/latest/)). +``` +mkdir MY_MODEL +tar -xvf MY_MODEL.nemo -c MY_MODEL +``` -- 💥**Performance Efficiency**: Enables quantization of large LLMs, such as ✨`Llama3.1-405B` and ✨`DeepSeek-R1-671B`, with PPL evaluation on a `single A100/H100/H800 GPU`. +#### Multi-GPU evaluation with NVIDIA `nemo` models -## ⚙️ Usage +By default, only one GPU is used. But we do support either data replication or tensor/pipeline parallelism during evaluation, on one node. -Please refer to the 🚀`Quick Start` section in the [documentation](https://llmc-en.readthedocs.io/en/latest/). +1) To enable data replication, set the `model_args` of `devices` to the number of data replicas to run. For example, the command to run 8 data replicas over 8 GPUs is: +```bash +torchrun --nproc-per-node=8 --no-python lm_eval \ + --model nemo_lm \ + --model_args path=,devices=8 \ + --tasks hellaswag \ + --batch_size 32 +``` -## :robot: Supported Model List +2) To enable tensor and/or pipeline parallelism, set the `model_args` of `tensor_model_parallel_size` and/or `pipeline_model_parallel_size`. In addition, you also have to set up `devices` to be equal to the product of `tensor_model_parallel_size` and/or `pipeline_model_parallel_size`. For example, the command to use one node of 4 GPUs with tensor parallelism of 2 and pipeline parallelism of 2 is: +```bash +torchrun --nproc-per-node=4 --no-python lm_eval \ + --model nemo_lm \ + --model_args path=,devices=4,tensor_model_parallel_size=2,pipeline_model_parallel_size=2 \ + --tasks hellaswag \ + --batch_size 32 +``` +Note that it is recommended to substitute the `python` command by `torchrun --nproc-per-node= --no-python` to facilitate loading the model into the GPUs. This is especially important for large checkpoints loaded into multiple GPUs. -- ✅ [BLOOM](https://huggingface.co/bigscience/bloom) -- ✅ [LLaMA](https://github.com/facebookresearch/llama) -- ✅ [LLaMA V2](https://huggingface.co/meta-llama) -- ✅ [StarCoder](https://github.com/bigcode-project/starcoder) -- ✅ [OPT](https://huggingface.co/docs/transformers/model_doc/opt) +Not supported yet: multi-node evaluation and combinations of data replication with tensor or pipeline parallelism. -
-More Supported Models  +### Tensor + Data Parallel and Optimized Inference with `vLLM` -- ✅ [Falcon](https://huggingface.co/docs/transformers/model_doc/falcon) -- ✅ [InternLM2](https://huggingface.co/internlm) -- ✅ [Mistral](https://huggingface.co/docs/transformers/model_doc/mistral) -- ✅ [LLaMA V3](https://huggingface.co/meta-llama) -- ✅ [Mixtral](https://huggingface.co/docs/transformers/model_doc/mixtral) -- ✅ [Qwen V2](https://github.com/QwenLM/Qwen2) -- ✅ [LLaVA](https://github.com/haotian-liu/LLaVA) -- ✅ [InternLM2.5](https://huggingface.co/internlm) -- ✅ [StableLM](https://github.com/Stability-AI/StableLM) -- ✅ [Gemma2](https://huggingface.co/docs/transformers/main/en/model_doc/gemma2) -- ✅ [Phi2](https://huggingface.co/microsoft/phi-2) -- ✅ [Phi 1.5](https://huggingface.co/microsoft/phi-1_5) -- ✅ [MiniCPM](https://github.com/OpenBMB/MiniCPM) -- ✅ [SmolLM](https://huggingface.co/collections/HuggingFaceTB/smollm-6695016cad7167254ce15966) -- ✅ [DeepSeekv2.5](https://huggingface.co/deepseek-ai/DeepSeek-V2.5) -- ✅ [LLaMA V3.2 Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision) -- ✅ [Qwen MOE](https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B) -- ✅ [Qwen2-VL](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct) -- ✅ [InternVL2](https://huggingface.co/OpenGVLab/InternVL2-2B) +We also support vLLM for faster inference on [supported model types](https://docs.vllm.ai/en/latest/models/supported_models.html), especially faster when splitting a model across multiple GPUs. For single-GPU or multi-GPU — tensor parallel, data parallel, or a combination of both — inference, for example: -
+```bash +lm_eval --model vllm \ + --model_args pretrained={model_name},tensor_parallel_size={GPUs_per_model},dtype=auto,gpu_memory_utilization=0.8,data_parallel_size={model_replicas} \ + --tasks lambada_openai \ + --batch_size auto +``` +To use vllm, do `pip install lm_eval[vllm]`. For a full list of supported vLLM configurations, please reference our [vLLM integration](https://github.com/EleutherAI/lm-evaluation-harness/blob/e74ec966556253fbe3d8ecba9de675c77c075bce/lm_eval/models/vllm_causallms.py) and the vLLM documentation. -You can add your own model type referring to files under `llmc/models/*.py`. +vLLM occasionally differs in output from Huggingface. We treat Huggingface as the reference implementation, and provide a [script](./scripts/model_comparator.py) for checking the validity of vllm results against HF. -## :bus: Supported Backend List +> [!Tip] +> For fastest performance, we recommend using `--batch_size auto` for vLLM whenever possible, to leverage its continuous batching functionality! -- ✅ [VLLM](https://github.com/vllm-project/vllm) -- ✅ [LightLLM](https://github.com/ModelTC/lightllm) -- ✅ [Sglang](https://github.com/sgl-project/sglang) -- ✅ [MLC-LLM](https://github.com/mlc-ai/mlc-llm) -- ✅ [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) +> [!Tip] +> Passing `max_model_len=4096` or some other reasonable default to vLLM through model args may cause speedups or prevent out-of-memory errors when trying to use auto batch size, such as for Mistral-7B-v0.1 which defaults to a maximum length of 32k. -## 💡 Supported Algorithm List +### Model APIs and Inference Servers -### Token Reduction +Our library also supports the evaluation of models served via several commercial APIs, and we hope to implement support for the most commonly used performant local/self-hosted inference servers. -- ✅ [ToMe](https://arxiv.org/abs/2210.09461) -- ✅ [FastV](https://arxiv.org/abs/2403.06764) -- ✅ [SparseVLM](https://arxiv.org/abs/2410.04417) -- ✅ [VisionZip](https://arxiv.org/abs/2412.04467) +To call a hosted model, use: -
-More Supported Algorithms  +```bash +export OPENAI_API_KEY=YOUR_KEY_HERE +lm_eval --model openai-completions \ + --model_args model=davinci \ + --tasks lambada_openai,hellaswag +``` -- ✅ [PyramidDrop](https://arxiv.org/abs/2410.17247) -- ✅ [VisPruner](https://arxiv.org/abs/2412.01818) -- ✅ [MustDrop](https://arxiv.org/abs/2411.10803) -- ✅ [DART](https://arxiv.org/abs/2502.11494) -- ✅ [DyCoke](https://arxiv.org/abs/2411.15024) -- ✅ [PruneVid](https://arxiv.org/abs/2412.16117) -- ✅ [FastVID](https://arxiv.org/abs/2503.11187) -- ✅ [HoliTom](https://arxiv.org/abs/2505.21334) +We also support using your own local inference server with servers that mirror the OpenAI Completions and ChatCompletions APIs. -
+```bash +lm_eval --model local-completions --tasks gsm8k --model_args model=facebook/opt-125m,base_url=http://{yourip}:8000/v1/completions,num_concurrent=1,max_retries=3,tokenized_requests=False,batch_size=16 +``` +Note that for externally hosted models, configs such as `--device` which relate to where to place a local model should not be used and do not function. Just like you can use `--model_args` to pass arbitrary arguments to the model constructor for local models, you can use it to pass arbitrary arguments to the model API for hosted models. See the documentation of the hosting service for information on what arguments they support. + +| API or Inference Server | Implemented? | `--model ` name | Models supported: | Request Types: | +|---------------------------------------------------------------------------------------------------------------------------|---------------------------------|-----------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------| +| OpenAI Completions | :heavy_check_mark: | `openai-completions`, `local-completions` | All OpenAI Completions API models | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | +| OpenAI ChatCompletions | :heavy_check_mark: | `openai-chat-completions`, `local-chat-completions` | [All ChatCompletions API models](https://platform.openai.com/docs/guides/gpt) | `generate_until` (no logprobs) | +| Anthropic | :heavy_check_mark: | `anthropic` | [Supported Anthropic Engines](https://docs.anthropic.com/claude/reference/selecting-a-model) | `generate_until` (no logprobs) | +| Anthropic Chat | :heavy_check_mark: | `anthropic-chat`, `anthropic-chat-completions` | [Supported Anthropic Engines](https://docs.anthropic.com/claude/docs/models-overview) | `generate_until` (no logprobs) | +| Textsynth | :heavy_check_mark: | `textsynth` | [All supported engines](https://textsynth.com/documentation.html#engines) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | +| Cohere | [:hourglass: - blocked on Cohere API bug](https://github.com/EleutherAI/lm-evaluation-harness/pull/395) | N/A | [All `cohere.generate()` engines](https://docs.cohere.com/docs/models) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | +| [Llama.cpp](https://github.com/ggerganov/llama.cpp) (via [llama-cpp-python](https://github.com/abetlen/llama-cpp-python)) | :heavy_check_mark: | `gguf`, `ggml` | [All models supported by llama.cpp](https://github.com/ggerganov/llama.cpp) | `generate_until`, `loglikelihood`, (perplexity evaluation not yet implemented) | +| vLLM | :heavy_check_mark: | `vllm` | [Most HF Causal Language Models](https://docs.vllm.ai/en/latest/models/supported_models.html) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | +| Mamba | :heavy_check_mark: | `mamba_ssm` | [Mamba architecture Language Models via the `mamba_ssm` package](https://huggingface.co/state-spaces) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | +| Huggingface Optimum (Causal LMs) | ✔️ | `openvino` | Any decoder-only AutoModelForCausalLM converted with Huggingface Optimum into OpenVINO™ Intermediate Representation (IR) format | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | ... | +| Neuron via AWS Inf2 (Causal LMs) | ✔️ | `neuronx` | Any decoder-only AutoModelForCausalLM supported to run on [huggingface-ami image for inferentia2](https://aws.amazon.com/marketplace/pp/prodview-gr3e6yiscria2) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | ... | +| [Neural Magic DeepSparse](https://github.com/neuralmagic/deepsparse) | ✔️ | `deepsparse` | Any LM from [SparseZoo](https://sparsezoo.neuralmagic.com/) or on [HF Hub with the "deepsparse" tag](https://huggingface.co/models?other=deepsparse) | `generate_until`, `loglikelihood` | ... | +| [Neural Magic SparseML](https://github.com/neuralmagic/sparseml) | ✔️ | `sparseml` | Any decoder-only AutoModelForCausalLM from [SparseZoo](https://sparsezoo.neuralmagic.com/) or on [HF Hub](https://huggingface.co/neuralmagic). Especially useful for models with quantization like [`zoo:llama2-7b-gsm8k_llama2_pretrain-pruned60_quantized`](https://sparsezoo.neuralmagic.com/models/llama2-7b-gsm8k_llama2_pretrain-pruned60_quantized) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | ... | +| Your local inference server! | :heavy_check_mark: | `local-completions` or `local-chat-completions` | Support for OpenAI API-compatible servers, with easy customization for other APIs. | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | | ... | + +Models which do not supply logits or logprobs can be used with tasks of type `generate_until` only, while local models, or APIs that supply logprobs/logits of their prompts, can be run on all task types: `generate_until`, `loglikelihood`, `loglikelihood_rolling`, and `multiple_choice`. + +For more information on the different task `output_types` and model request types, see [our documentation](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/model_guide.md#interface). + +> [!Note] +> For best performance with closed chat model APIs such as Anthropic Claude 3 and GPT-4, we recommend carefully looking at a few sample outputs using `--limit 10` first to confirm answer extraction and scoring on generative tasks is performing as expected. providing `system=""` within `--model_args` for anthropic-chat-completions, to instruct the model what format to respond in, may be useful. + + +### Other Frameworks + +A number of other libraries contain scripts for calling the eval harness through their library. These include [GPT-NeoX](https://github.com/EleutherAI/gpt-neox/blob/main/eval_tasks/eval_adapter.py), [Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed/blob/main/examples/MoE/readme_evalharness.md), and [mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/blob/master/eval_harness.py). + +To create your own custom integration you can follow instructions from [this tutorial](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/interface.md#external-library-usage). + +### Additional Features +> [!Note] +> For tasks unsuitable for direct evaluation — either due risks associated with executing untrusted code or complexities in the evaluation process — the `--predict_only` flag is available to obtain decoded generations for post-hoc evaluation. + +If you have a Metal compatible Mac, you can run the eval harness using the MPS back-end by replacing `--device cuda:0` with `--device mps` (requires PyTorch version 2.1 or higher). **Note that the PyTorch MPS backend is still in early stages of development, so correctness issues or unsupported operations may exist. If you observe oddities in model performance on the MPS back-end, we recommend first checking that a forward pass of your model on `--device cpu` and `--device mps` match.** + +> [!Note] +> You can inspect what the LM inputs look like by running the following command: +> ```bash +> python write_out.py \ +> --tasks \ +> --num_fewshot 5 \ +> --num_examples 10 \ +> --output_base_path /path/to/output/folder +> ``` +> This will write out one text file for each task. + +To verify the data integrity of the tasks you're performing in addition to running the tasks themselves, you can use the `--check_integrity` flag: + +```bash +lm_eval --model openai \ + --model_args engine=davinci \ + --tasks lambada_openai,hellaswag \ + --check_integrity +``` -### Quantization +## Advanced Usage Tips -- ✅ Naive -- ✅ [AWQ](https://arxiv.org/abs/2306.00978) -- ✅ [GPTQ](https://arxiv.org/abs/2210.17323) -- ✅ [SmoothQuant](https://arxiv.org/abs/2211.10438) -- ✅ [OS+](https://arxiv.org/abs/2304.09145) +For models loaded with the HuggingFace `transformers` library, any arguments provided via `--model_args` get passed to the relevant constructor directly. This means that anything you can do with `AutoModel` can be done with our library. For example, you can pass a local path via `pretrained=` or use models finetuned with [PEFT](https://github.com/huggingface/peft) by taking the call you would run to evaluate the base model and add `,peft=PATH` to the `model_args` argument: +```bash +lm_eval --model hf \ + --model_args pretrained=EleutherAI/gpt-j-6b,parallelize=True,load_in_4bit=True,peft=nomic-ai/gpt4all-j-lora \ + --tasks openbookqa,arc_easy,winogrande,hellaswag,arc_challenge,piqa,boolq \ + --device cuda:0 +``` -
-More Supported Algorithms  +Models provided as delta weights can be easily loaded using the Hugging Face transformers library. Within --model_args, set the delta argument to specify the delta weights, and use the pretrained argument to designate the relative base model to which they will be applied: +```bash +lm_eval --model hf \ + --model_args pretrained=Ejafa/llama_7B,delta=lmsys/vicuna-7b-delta-v1.1 \ + --tasks hellaswag +``` -- ✅ [OmniQuant](https://arxiv.org/abs/2308.13137) -- ✅ [NormTweaking](https://arxiv.org/abs/2309.02784) -- ✅ [AdaDim](https://arxiv.org/pdf/2309.15531.pdf) -- ✅ [QUIK](https://arxiv.org/abs/2310.09259) -- ✅ [SpQR](https://arxiv.org/abs/2306.03078) -- ✅ [DGQ](https://arxiv.org/abs/2310.04836) -- ✅ [OWQ](https://arxiv.org/abs/2306.02272) -- ✅ [LLM.int8()](https://arxiv.org/abs/2208.07339) -- ✅ [HQQ](https://mobiusml.github.io/hqq_blog/) -- ✅ [QuaRot](https://arxiv.org/abs/2404.00456) -- ✅ [SpinQuant](https://arxiv.org/abs/2405.16406) **([See this branch](https://github.com/ModelTC/llmc/tree/dev_spinquant))** -- ✅ [TesseraQ](https://arxiv.org/abs/2410.19103) +[GPTQ](https://github.com/PanQiWei/AutoGPTQ) quantized models can be loaded by specifying their file names in `,autogptq=NAME` (or `,autogptq=True` for default names) in the `model_args` argument: -
+```bash +lm_eval --model hf \ + --model_args pretrained=model-name-or-path,autogptq=model.safetensors,gptq_use_triton=True \ + --tasks hellaswag +``` -### Pruning +We support wildcards in task names, for example you can run all of the machine-translated lambada tasks via `--task lambada_openai_mt_*`. -- ✅ Naive(Magnitude) -- ✅ [Wanda](https://arxiv.org/abs/2306.11695) -- ✅ [ShortGPT](https://arxiv.org/abs/2403.03853) - -## 🤝 Acknowledgments - -We develop our code referring to the following repos: - -- [mit-han-lab/llm-awq](https://github.com/mit-han-lab/llm-awq) -- [mit-han-lab/smoothquant](https://github.com/mit-han-lab/smoothquant) -- [OpenGVLab/OmniQuant](https://github.com/OpenGVLab/OmniQuant) -- [IST-DASLab/gptq](https://github.com/IST-DASLab/gptq) -- [ModelTC/Outlier_Suppression_Plus](https://github.com/ModelTC/Outlier_Suppression_Plus) - -
-More Related Implementations  - -- [IST-DASLab/QUIK](https://github.com/IST-DASLab/QUIK) -- [Vahe1994/SpQR](https://github.com/Vahe1994/SpQR) -- [ilur98/DGQ](https://github.com/ilur98/DGQ) -- [xvyaward/owq](https://github.com/xvyaward/owq) -- [TimDettmers/bitsandbytes](https://github.com/TimDettmers/bitsandbytes) -- [mobiusml/hqq](https://github.com/mobiusml/hqq) -- [spcl/QuaRot](https://github.com/spcl/QuaRot) -- [locuslab/wanda](https://github.com/locuslab/wanda) -- [EleutherAI/lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness) -- [facebookresearch/SpinQuant](https://github.com/facebookresearch/SpinQuant) -- [Intelligent-Computing-Lab-Yale/TesseraQ](https://github.com/Intelligent-Computing-Lab-Yale/TesseraQ) +## Saving Results -
+To save evaluation results provide an `--output_path`. We also support logging model responses with the `--log_samples` flag for post-hoc analysis. -## 🌟 Star History +Additionally, one can provide a directory with `--use_cache` to cache the results of prior runs. This allows you to avoid repeated execution of the same (model, task) pairs for re-scoring. -[![Star History Chart](https://api.star-history.com/svg?repos=ModelTC/llmc&type=Timeline)](https://star-history.com/#ModelTC/llmc&Timeline) +To push results and samples to the Hugging Face Hub, first ensure an access token with write access is set in the `HF_TOKEN` environment variable. Then, use the `--hf_hub_log_args` flag to specify the organization, repository name, repository visibility, and whether to push results and samples to the Hub - [example dataset on the HF Hub](https://huggingface.co/datasets/KonradSzafer/lm-eval-results-demo). For instance: -## ✏️ Citation +```bash +lm_eval --model hf \ + --model_args pretrained=model-name-or-path,autogptq=model.safetensors,gptq_use_triton=True \ + --tasks hellaswag \ + --log_samples \ + --output_path results \ + --hf_hub_log_args hub_results_org=EleutherAI,hub_repo_name=lm-eval-results,push_results_to_hub=True,push_samples_to_hub=True,public_repo=False \ +``` -If you find our toolkit or research paper useful or relevant to your research, please kindly cite our work: +This allows you to easily download the results and samples from the Hub, using: +```python +from datasets import load_dataset +load_dataset("EleutherAI/lm-eval-results-private", "hellaswag", "latest") ``` -@article{lv2025llmc+, - title={LLMC+: Benchmarking Vision-Language Model Compression with a Plug-and-play Toolkit}, - author={Lv, Chengtao and Zhang, Bilang and Yong, Yang and Gong, Ruihao and Huang, Yushi and Gu, Shiqiao and Wu, Jiajun and Shi, Yumeng and Guo, Jinyang and Wang, Wenya}, - journal={arXiv preprint arXiv:2508.09981}, - year={2025} -} + +For a full list of supported arguments, check out the [interface](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/interface.md) guide in our documentation! + +## Visualizing Results + +You can seamlessly visualize and analyze the results of your evaluation harness runs using both Weights & Biases (W&B) and Zeno. + +### Zeno + +You can use [Zeno](https://zenoml.com) to visualize the results of your eval harness runs. + +First, head to [hub.zenoml.com](https://hub.zenoml.com) to create an account and get an API key [on your account page](https://hub.zenoml.com/account). +Add this key as an environment variable: + +```bash +export ZENO_API_KEY=[your api key] +``` + +You'll also need to install the `lm_eval[zeno]` package extra. + +To visualize the results, run the eval harness with the `log_samples` and `output_path` flags. +We expect `output_path` to contain multiple folders that represent individual model names. +You can thus run your evaluation on any number of tasks and models and upload all of the results as projects on Zeno. + +```bash +lm_eval \ + --model hf \ + --model_args pretrained=EleutherAI/gpt-j-6B \ + --tasks hellaswag \ + --device cuda:0 \ + --batch_size 8 \ + --log_samples \ + --output_path output/gpt-j-6B +``` + +Then, you can upload the resulting data using the `zeno_visualize` script: + +```bash +python scripts/zeno_visualize.py \ + --data_path output \ + --project_name "Eleuther Project" +``` + +This will use all subfolders in `data_path` as different models and upload all tasks within these model folders to Zeno. +If you run the eval harness on multiple tasks, the `project_name` will be used as a prefix and one project will be created per task. + +You can find an example of this workflow in [examples/visualize-zeno.ipynb](examples/visualize-zeno.ipynb). + +### Weights and Biases + +With the [Weights and Biases](https://wandb.ai/site) integration, you can now spend more time extracting deeper insights into your evaluation results. The integration is designed to streamline the process of logging and visualizing experiment results using the Weights & Biases (W&B) platform. + +The integration provide functionalities + +- to automatically log the evaluation results, +- log the samples as W&B Tables for easy visualization, +- log the `results.json` file as an artifact for version control, +- log the `_eval_samples.json` file if the samples are logged, +- generate a comprehensive report for analysis and visualization with all the important metric, +- log task and cli specific configs, +- and more out of the box like the command used to run the evaluation, GPU/CPU counts, timestamp, etc. + +First you'll need to install the lm_eval[wandb] package extra. Do `pip install lm_eval[wandb]`. + +Authenticate your machine with an your unique W&B token. Visit https://wandb.ai/authorize to get one. Do `wandb login` in your command line terminal. + +Run eval harness as usual with a `wandb_args` flag. Use this flag to provide arguments for initializing a wandb run ([wandb.init](https://docs.wandb.ai/ref/python/init)) as comma separated string arguments. + +```bash +lm_eval \ + --model hf \ + --model_args pretrained=microsoft/phi-2,trust_remote_code=True \ + --tasks hellaswag,mmlu_abstract_algebra \ + --device cuda:0 \ + --batch_size 8 \ + --output_path output/phi-2 \ + --limit 10 \ + --wandb_args project=lm-eval-harness-integration \ + --log_samples ``` +In the stdout, you will find the link to the W&B run page as well as link to the generated report. You can find an example of this workflow in [examples/visualize-wandb.ipynb](examples/visualize-wandb.ipynb), and an example of how to integrate it beyond the CLI. + +## How to Contribute or Learn More? + +For more information on the library and how everything fits together, check out all of our [documentation pages](https://github.com/EleutherAI/lm-evaluation-harness/tree/main/docs)! We plan to post a larger roadmap of desired + planned library improvements soon, with more information on how contributors can help. + +### Implementing new tasks + +To implement a new task in the eval harness, see [this guide](./docs/new_task_guide.md). + +In general, we follow this priority list for addressing concerns about prompting and other eval details: +1. If there is widespread agreement among people who train LLMs, use the agreed upon procedure. +2. If there is a clear and unambiguous official implementation, use that procedure. +3. If there is widespread agreement among people who evaluate LLMs, use the agreed upon procedure. +4. If there are multiple common implementations but not universal or widespread agreement, use our preferred option among the common implementations. As before, prioritize choosing from among the implementations found in LLM training papers. + +These are guidelines and not rules, and can be overruled in special circumstances. + +We try to prioritize agreement with the procedures used by other groups to decrease the harm when people inevitably compare runs across different papers despite our discouragement of the practice. Historically, we also prioritized the implementation from [Language Models are Few Shot Learners](https://arxiv.org/abs/2005.14165) as our original goal was specifically to compare results with that paper. + +### Support + +The best way to get support is to open an issue on this repo or join the [EleutherAI Discord server](https://discord.gg/eleutherai). The `#lm-thunderdome` channel is dedicated to developing this project and the `#release-discussion` channel is for receiving support for our releases. If you've used the library and have had a positive (or negative) experience, we'd love to hear from you! + +## Optional Extras +Extras dependencies can be installed via `pip install -e ".[NAME]"` + +| Name | Use | +|-----------------|----------------------------------------------| +| api | For using api models (Anthropic, OpenAI API) | +| deepsparse | For running NM's DeepSparse models | +| dev | For linting PRs and contributions | +| gptq | For loading models with GPTQ | +| hf_transfer | For speeding up HF Hub file downloads | +| ifeval | For running the IFEval task | +| neuronx | For running on AWS inf2 instances | +| mamba | For loading Mamba SSM models | +| math | For running math task answer checking | +| multilingual | For multilingual tokenizers | +| optimum | For running Intel OpenVINO models | +| promptsource | For using PromptSource prompts | +| sentencepiece | For using the sentencepiece tokenizer | +| sparseml | For using NM's SparseML models | +| testing | For running library test suite | +| vllm | For loading models with vLLM | +| zeno | For visualizing results with Zeno | +| --------------- | --------------------------------------- | +| all | Loads all extras (not recommended) | + +## Cite as + ``` -@inproceedings{DBLP:conf/emnlp/GongYGHLZT024, - author={Ruihao Gong and Yang Yong and Shiqiao Gu and Yushi Huang and Chengtao Lv and Yunchen Zhang and Dacheng Tao and Xianglong Liu}, - title={LLMC: Benchmarking Large Language Model Quantization with a Versatile Compression Toolkit}, - year={2024}, - cdate={1704067200000}, - pages={132-152}, - url={https://aclanthology.org/2024.emnlp-industry.12}, - booktitle={EMNLP (Industry Track)}, - crossref={conf/emnlp/2024i} +@misc{eval-harness, + author = {Gao, Leo and Tow, Jonathan and Abbasi, Baber and Biderman, Stella and Black, Sid and DiPofi, Anthony and Foster, Charles and Golding, Laurence and Hsu, Jeffrey and Le Noac'h, Alain and Li, Haonan and McDonell, Kyle and Muennighoff, Niklas and Ociepa, Chris and Phang, Jason and Reynolds, Laria and Schoelkopf, Hailey and Skowron, Aviya and Sutawika, Lintang and Tang, Eric and Thite, Anish and Wang, Ben and Wang, Kevin and Zou, Andy}, + title = {A framework for few-shot language model evaluation}, + month = 07, + year = 2024, + publisher = {Zenodo}, + version = {v0.4.3}, + doi = {10.5281/zenodo.12608602}, + url = {https://zenodo.org/records/12608602} } ``` diff --git a/README_zh.md b/README_zh.md deleted file mode 100644 index d67ee5f32..000000000 --- a/README_zh.md +++ /dev/null @@ -1,233 +0,0 @@ -
-

LightCompress:迈向准确且高效的AIGC大模型压缩

- -llmc - -[![License](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) -[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/ModelTC/LightCompress) -[![arXiv](https://img.shields.io/badge/LLMC-2405.06001-b31b1b)](https://arxiv.org/abs/2405.06001) -[![Discord Banner](https://img.shields.io/discord/1139835312592392214?logo=discord&logoColor=white)](https://discord.com/invite/NfJzbkK3jY) -[![QQ](https://img.shields.io/badge/QQ-EB1923?logo=tencent-qq&logoColor=white)](http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=I9IGPWWj8uuRXWH3_ELWjouf6gkIMgUl&authKey=GA3WbFAsm90ePJf%2FCbc7ZyXXq4ShQktlBaLxgqS5yuSPAsr3%2BDKMRdosUiLYoilO&noverify=0&group_code=526192592) -[![Doc](https://img.shields.io/badge/docs-English-99cc2)](https://llmc-en.readthedocs.io/en/latest/) -[![Doc](https://img.shields.io/badge/文档-中文-99cc2)](https://llmc-zhcn.readthedocs.io/en/latest/)  - -**\[ [English](README.md) | 中文 \]** - -
- -> **📢 提示**: 本仓库原名为 **LLMC**,现已更名为 **LightCompress**。 - -**LightCompress** 是一个开箱即用的工具,专为压缩AIGC大模型(LLM, VLM, Diffusion ...)设计,利用最先进的压缩算法提高效率并减少模型体积,同时不影响预测精度。你可以通过以下命令下载可以运行LightCompress的docker镜像,中国大陆用户推荐使用阿里云docker。 - -```shell -# Docker Hub: https://hub.docker.com/r/llmcompression/llmc -docker pull llmcompression/llmc:pure-latest - -# 阿里云镜像: registry.cn-hangzhou.aliyuncs.com/yongyang/llmcompression:[tag] -docker pull registry.cn-hangzhou.aliyuncs.com/yongyang/llmcompression:pure-latest -``` - -**社区**: [Discord 服务器](https://discord.com/invite/NfJzbkK3jY)、[腾讯 QQ 群](http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=I9IGPWWj8uuRXWH3_ELWjouf6gkIMgUl&authKey=GA3WbFAsm90ePJf%2FCbc7ZyXXq4ShQktlBaLxgqS5yuSPAsr3%2BDKMRdosUiLYoilO&noverify=0&group_code=526192592)。 - -**文档**: [English](https://llmc-en.readthedocs.io/en/latest/)、[中文](https://llmc-zhcn.readthedocs.io/en/latest/)。 - -## :fire: 最新动态 - -- **2025年8月13日:** 🚀 我们已开源针对 **视觉语言模型(VLMs)** 的压缩方案,支持共计超过 **20 种算法**,涵盖 **token reduction** 和 **quantization**。此次发布为多模态任务提供了灵活、即插即用的压缩策略。具体请参阅[文档](https://llmc-zhcn.readthedocs.io/en/latest/advanced/token_reduction.html)。 - -- **2025年5月12日:** 🔥 我们现已全面支持 **`Wan2.1`** 系列视频生成模型的量化,并支持导出真实量化的 **INT8/FP8** 权重,兼容 [lightx2v](https://github.com/ModelTC/lightx2v) 推理框架。详情请参考 [lightx2v 使用文档](https://llmc-zhcn.readthedocs.io/en/latest/backend/lightx2v.html)。 - -- **2025年2月7日:** 🔥 我们现已全面支持 **`DeepSeekv3`**、**`DeepSeek-R1`** 和 **`DeepSeek-R1-zero`** 等 671B 大规模 **`MOE`** 模型的量化。 您可以直接加载 `FP8` 权重,无需额外转换,使用单张 80G 显存的 GPU 即可运行 `AWQ` 和 `RTN` 量化,同时还支持导出真实量化的 **INT4/INT8** 权重 - -- **2024年11月20日:** 🔥 我们现已全面支持✨`DeepSeekv2(2.5)`等`MOE`模型以及✨`Qwen2VL`、`Llama3.2`等`VLM`模型的量化。支持的量化方案包括✅整型量化、✅浮点量化,以及✅AWQ、✅GPTQ、✅SmoothQuant 和 ✅Quarot 等先进算法。 - -- **2024年11月12日:** 🔥 我们新增对各种模型和算法的💥`激活静态 per-tensor量化`支持,涵盖✅整型量化和✅浮点量化,进一步优化性能和效率。同时支持导出`✨真实量化模型`,并使用 [VLLM](https://github.com/vllm-project/vllm)和[SGLang](https://github.com/sgl-project/sglang)后端进行推理加速,具体请参阅[VLLM文档](https://llmc-zhcn.readthedocs.io/en/latest/backend/vllm.html)和[SGLang文档](https://llmc-zhcn.readthedocs.io/en/latest/backend/sglang.html)。 - -- **2024年9月26日:** 🔥 我们现在支持从🚀 `LLMC`导出💥 `FP8 量化(E4M3,E5M2)`模型到一些先进的推理后端,例如[VLLM](https://github.com/vllm-project/vllm)和[SGLang](https://github.com/sgl-project/sglang)。关于详细使用方法,请参阅[VLLM文档](https://llmc-zhcn.readthedocs.io/en/latest/backend/vllm.html)和[SGLang文档](https://llmc-zhcn.readthedocs.io/en/latest/backend/sglang.html)。 - -
-更早动态 - -- **2024年9月24日:** 🔥 我们正式发布了 ✨`Llama-3.1-405B` 的 ✅INT4 和 ✅INT8 模型,这些模型通过 🚀`LLMC` 使用 `save_lightllm` 模式进行量化。你可以在[此处](https://huggingface.co/Dongz/llama31-405b-quant)下载模型参数。 - -- **2024年9月23日:** 🔥 我们现在支持从 🚀`LLMC` 导出 ✨`真正量化的(INT4, INT8)` 模型到先进推理后端,例如 [VLLM](https://github.com/vllm-project/vllm), [SGLang](https://github.com/sgl-project/sglang), [AutoAWQ](https://github.com/casper-hansen/AutoAWQ), 和 [MLC-LLM](https://github.com/mlc-ai/mlc-llm) 用于量化推理部署,从而实现 ✨`减少内存使用` 和 ✨`加快推理速度`。 - 详细使用方法,请参考 [VLLM 文档](https://llmc-zhcn.readthedocs.io/en/latest/backend/vllm.html)、[SGLang 文档](https://llmc-zhcn.readthedocs.io/en/latest/backend/sglang.html)、[AutoAWQ 文档](https://llmc-zhcn.readthedocs.io/en/latest/backend/autoawq.html) 和 [MLC-LLM 文档](https://llmc-zhcn.readthedocs.io/en/latest/backend/mlcllm.html)。 - -- **2024年9月9日:** 🔥 我们提供了一些最佳实践配置,帮助提升性能(参见最佳实践[此处](https://llmc-zhcn.readthedocs.io/en/latest/))。 - -- **2024年9月3日:** 🔥 我们支持通过[opencompass](https://github.com/open-compass/opencompass) 评估 🚀`LLMC` 模型。请参考此[文档](https://llmc-zhcn.readthedocs.io/en/latest/advanced/model_test_v2.html)试用! - -- **2024年8月22日:** 🔥我们支持许多小型语言模型,包括当前SOTA的 [SmolLM](https://huggingface.co/collections/HuggingFaceTB/smollm-6695016cad7167254ce15966)(参见[支持的模型列表](#supported-model-list))。 - -- **2024年8月22日:** 🔥此外,我们还支持通过我们修改的 [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness) 进行下游任务评估 🤗。具体操作,用户可以先采用 `save_trans` 模式(参见 [配置](https://llmc-zhcn.readthedocs.io/en/latest/configs.html) 中的 `save` 部分)保存权重修改后的模型。在获得转换模型后,可以直接参考 [run_lm_eval.sh](scripts/run_lm_eval.sh) 对量化模型进行评估。更多细节请见[此处](https://llmc-zhcn.readthedocs.io/en/latest/advanced/model_test_v1.html)。 - -- **2024年7月23日:** 🍺🍺🍺 我们发布了全新的基准论文: - - [**LLMC: Benchmarking Large Language Model Quantization with a Versatile Compression Toolkit**](https://arxiv.org/abs/2405.06001v2)。 - - [Ruihao Gong\*](https://xhplus.github.io/), [Yang Yong\*](https://github.com/helloyongyang), [Shiqiao Gu\*](https://github.com/gushiqiao), [Yushi Huang\*](https://github.com/Harahan), [Chengtao Lv](https://scholar.google.com/citations?user=r8vseSUAAAAJ&hl=en), [Yunchen Zhang](https://scholar.google.com/citations?user=glkWFyUAAAAJ&hl=en), [Xianglong Liu📧](https://xlliu-beihang.github.io/), [Dacheng Tao](https://scholar.google.com/citations?user=RwlJNLcAAAAJ&hl=en) - - (\* 表示同等贡献,📧 表示通讯作者。) - -- **2024年7月16日:** 🔥我们现在支持 Wanda/Naive(幅度)进行 LLM 稀疏化和逐层混合比特量化! - -- **2024年7月14日:** 🔥我们现在支持基于旋转的量化 QuaRot! - -- **2024年5月17日:** 🚀 我们现在支持一些先进的大型模型,例如 LLaVA、Mixtral、LLaMA V3 和 Qwen V2。快来试试吧! - -- **2024年5月13日:** 🍺🍺🍺 我们发布了量化基准论文: - - [**LLM-QBench: A Benchmark Towards the Best Practice for Post-training Quantization of Large Language Models**](https://arxiv.org/abs/2405.06001)。 - - [Ruihao Gong\*](https://xhplus.github.io/), [Yang Yong\*](https://github.com/helloyongyang), [Shiqiao Gu\*](https://github.com/gushiqiao), [Yushi Huang\*](https://github.com/Harahan), [Yunchen Zhang](https://scholar.google.com/citations?user=glkWFyUAAAAJ&hl=en), [Xianglong Liu📧](https://xlliu-beihang.github.io/), [Dacheng Tao](https://scholar.google.com/citations?user=RwlJNLcAAAAJ&hl=en) - - (\* 表示同等贡献,📧 表示通讯作者。) - -
- comp -
- - 我们模块化且公平地基准测试了量化技术,考虑了校准成本、推理效率和量化准确性。在多种模型和数据集上进行了近600次实验,得出了三个关于校准数据、算法管道和量化配置选择的有见地的结论。基于这些结论,设计了一种LLM后训练量化管道的最佳实践,以在各种场景下实现最佳的准确性和效率平衡。 - -- **2024年3月7日:** 🚀 我们发布了一个功能强大且高效的LLM压缩工具的量化部分。值得注意的是,我们的基准论文即将发布😊。 - -
- -## 🚀 亮点功能 - -- 💥**综合算法支持**: 提供广泛的 ✨`SOTA压缩算法` 支持,包括 ✅量化、✅混合精度量化 和 ✅稀疏化,同时保持与原始仓库一致的精度。我们还提供 ✨`量化最佳实践`(参见✨`最佳实践` 章节[此处](https://llmc-zhcn.readthedocs.io/en/latest/)),确保最佳性能和效率。 - -- 💥**支持的格式**: 支持 ✨`量化`(整型和浮点)和 ✨`稀疏化`,具体包括 ✅权重激活量化、✅权重量化、✅混合精度量化,以及 ✅结构化 和 ✅非结构化稀疏化。 - -- 💥**广泛模型支持**: 支持多种 ✨`LLM模型`,包括 ✅LLama、✅Mistral、✅InternLM2、✅Qwen2 等,以及 ✅MOE(DeepSeekv3, Deepseek-R1) 和 ✅VLM(Llama3.2-vision, Qwen2-vl) 模型(参见[支持的模型列表](#supported-model-list))。 - -- 💥**多后端兼容性**: 无缝集成多个后端,增强部署灵活性。多种量化设置和模型格式兼容广泛的后端和硬件平台,例如 ✅VLLM、✅Sglang、✅LightLLM、✅MLC-LLM 和 ✅AutoAWQ,使其高度灵活(参见✨`推理后端` 章节 [此处](https://llmc-zhcn.readthedocs.io/en/latest/))。 - -- 💥**性能效率**: 支持大规模LLM的量化,例如 ✨`Llama3.1-405B` 和 ✨`DeepSeek-R1-671B`,并可在 `单个 A100/H100/H800 GPU` 上评估 PPL。 - -## ⚙️ 快速上手 - -请参阅 🚀`快速入门`章节[此处](https://llmc-zhcn.readthedocs.io/en/latest/)。 - -## :robot: 支持的模型 - -- ✅ [BLOOM](https://huggingface.co/bigscience/bloom) -- ✅ [LLaMA](https://github.com/facebookresearch/llama) -- ✅ [LLaMA V2](https://huggingface.co/meta-llama) -- ✅ [StarCoder](https://github.com/bigcode-project/starcoder) -- ✅ [OPT](https://huggingface.co/docs/transformers/model_doc/opt) - -
-更多模型 - -- ✅ [Falcon](https://huggingface.co/docs/transformers/model_doc/falcon) -- ✅ [InternLM2](https://huggingface.co/internlm) -- ✅ [Mistral](https://huggingface.co/docs/transformers/model_doc/mistral) -- ✅ [LLaMA V3](https://huggingface.co/meta-llama) -- ✅ [Mixtral](https://huggingface.co/docs/transformers/model_doc/mixtral) -- ✅ [Qwen V2](https://github.com/QwenLM/Qwen2) -- ✅ [LLaVA](https://github.com/haotian-liu/LLaVA) -- ✅ [InternLM2.5](https://huggingface.co/internlm) -- ✅ [StableLM](https://github.com/Stability-AI/StableLM) -- ✅ [Gemma2](https://huggingface.co/docs/transformers/main/en/model_doc/gemma2) -- ✅ [Phi2](https://huggingface.co/microsoft/phi-2) -- ✅ [Phi 1.5](https://huggingface.co/microsoft/phi-1_5) -- ✅ [MiniCPM](https://github.com/OpenBMB/MiniCPM) -- ✅ [SmolLM](https://huggingface.co/collections/HuggingFaceTB/smollm-6695016cad7167254ce15966) -- ✅ [DeepSeekv2.5](https://huggingface.co/deepseek-ai/DeepSeek-V2.5) -- ✅ [LLaMA V3.2 Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision) -- ✅ [Qwen MOE](https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B) -- ✅ [Qwen2-VL](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct) -- ✅ [InternVL2](https://huggingface.co/OpenGVLab/InternVL2-2B) - -
- -您可参考 `llmc/models/*.py` 添加自定义模型。 - -## :bus: 支持的后端 - -- ✅ [VLLM](https://github.com/vllm-project/vllm) -- ✅ [LightLLM](https://github.com/ModelTC/lightllm) -- ✅ [Sglang](https://github.com/sgl-project/sglang) -- ✅ [MLC-LLM](https://github.com/mlc-ai/mlc-llm) -- ✅ [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) - -## 💡 支持的算法 - -### 量化 - -- ✅ Naive -- ✅ [AWQ](https://arxiv.org/abs/2306.00978) -- ✅ [GPTQ](https://arxiv.org/abs/2210.17323) -- ✅ [SmoothQuant](https://arxiv.org/abs/2211.10438) -- ✅ [OS+](https://arxiv.org/abs/2304.09145) - -
-更多算法 - -- ✅ [OmniQuant](https://arxiv.org/abs/2308.13137) -- ✅ [NormTweaking](https://arxiv.org/abs/2309.02784) -- ✅ [AdaDim](https://arxiv.org/pdf/2309.15531.pdf) -- ✅ [QUIK](https://arxiv.org/abs/2310.09259) -- ✅ [SpQR](https://arxiv.org/abs/2306.03078) -- ✅ [DGQ](https://arxiv.org/abs/2310.04836) -- ✅ [OWQ](https://arxiv.org/abs/2306.02272) -- ✅ [LLM.int8()](https://arxiv.org/abs/2208.07339) -- ✅ [HQQ](https://mobiusml.github.io/hqq_blog/) -- ✅ [QuaRot](https://arxiv.org/abs/2404.00456) -- ✅ [SpinQuant](https://arxiv.org/abs/2405.16406) **([见此分支](https://github.com/ModelTC/llmc/tree/dev_spinquant))** -- ✅ [TesseraQ](https://arxiv.org/abs/2410.19103) - -
- -### 剪枝 - -- ✅ Naive(Magnitude) -- ✅ [Wanda](https://arxiv.org/abs/2306.11695) -- ✅ [ShortGPT](https://arxiv.org/abs/2403.03853) - -## 🤝 致谢 - -本项目参考了以下仓库: - -- [mit-han-lab/llm-awq](https://github.com/mit-han-lab/llm-awq) -- [mit-han-lab/smoothquant](https://github.com/mit-han-lab/smoothquant) -- [OpenGVLab/OmniQuant](https://github.com/OpenGVLab/OmniQuant) -- [IST-DASLab/gptq](https://github.com/IST-DASLab/gptq) -- [ModelTC/Outlier_Suppression_Plus](https://github.com/ModelTC/Outlier_Suppression_Plus) - -
-更多相关实现 - -- [IST-DASLab/QUIK](https://github.com/IST-DASLab/QUIK) -- [Vahe1994/SpQR](https://github.com/Vahe1994/SpQR) -- [ilur98/DGQ](https://github.com/ilur98/DGQ) -- [xvyaward/owq](https://github.com/xvyaward/owq) -- [TimDettmers/bitsandbytes](https://github.com/TimDettmers/bitsandbytes) -- [mobiusml/hqq](https://github.com/mobiusml/hqq) -- [spcl/QuaRot](https://github.com/spcl/QuaRot) -- [locuslab/wanda](https://github.com/locuslab/wanda) -- [EleutherAI/lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness) -- [facebookresearch/SpinQuant](https://github.com/facebookresearch/SpinQuant) -- [Intelligent-Computing-Lab-Yale/TesseraQ](https://github.com/Intelligent-Computing-Lab-Yale/TesseraQ) - -
- -## 🌟 Star 历史 - -[![Star History Chart](https://api.star-history.com/svg?repos=ModelTC/llmc&type=Timeline)](https://star-history.com/#ModelTC/llmc&Timeline) - -## ✏️ 引用 - -如果您觉得本工具包或相关论文对您的研究有帮助,请引用: - -``` -@inproceedings{DBLP:conf/emnlp/GongYGHLZT024, - author = {Ruihao Gong and Yang Yong and Shiqiao Gu and Yushi Huang and Chengtao Lv and Yunchen Zhang and Dacheng Tao and Xianglong Liu}, - title = {LLMC: Benchmarking Large Language Model Quantization with a Versatile Compression Toolkit}, - booktitle = {EMNLP (Industry Track)}, - year = {2024}, - pages = {132--152}, - url = {https://aclanthology.org/2024.emnlp-industry.12} -} -``` diff --git a/assets/wan_i2v/calib/astronaut.jpg b/assets/wan_i2v/calib/astronaut.jpg deleted file mode 100644 index b2c8d3aa4..000000000 Binary files a/assets/wan_i2v/calib/astronaut.jpg and /dev/null differ diff --git a/assets/wan_i2v/calib/samples.json b/assets/wan_i2v/calib/samples.json deleted file mode 100755 index b810dfd76..000000000 --- a/assets/wan_i2v/calib/samples.json +++ /dev/null @@ -1,7 +0,0 @@ -[ - { - "image": "astronaut.jpg", - "prompt": "An astronaut hatching from an egg, on the surface of the moon, the darkness and depth of space realised in the background. High quality, ultrarealistic detail and breath-taking movie-like camera shot.", - "negative_prompt": "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards" - } -] diff --git a/assets/wan_i2v/eval/astronaut.jpg b/assets/wan_i2v/eval/astronaut.jpg deleted file mode 100644 index b2c8d3aa4..000000000 Binary files a/assets/wan_i2v/eval/astronaut.jpg and /dev/null differ diff --git a/assets/wan_i2v/eval/samples.json b/assets/wan_i2v/eval/samples.json deleted file mode 100755 index b810dfd76..000000000 --- a/assets/wan_i2v/eval/samples.json +++ /dev/null @@ -1,7 +0,0 @@ -[ - { - "image": "astronaut.jpg", - "prompt": "An astronaut hatching from an egg, on the surface of the moon, the darkness and depth of space realised in the background. High quality, ultrarealistic detail and breath-taking movie-like camera shot.", - "negative_prompt": "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards" - } -] diff --git a/assets/wan_t2v/calib/samples.json b/assets/wan_t2v/calib/samples.json deleted file mode 100755 index 62b1b5d36..000000000 --- a/assets/wan_t2v/calib/samples.json +++ /dev/null @@ -1,6 +0,0 @@ -[ - { - "prompt": "A cat walks on the grass, realistic", - "negative_prompt": "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards" - } -] diff --git a/assets/wan_t2v/eval/samples.json b/assets/wan_t2v/eval/samples.json deleted file mode 100755 index 62b1b5d36..000000000 --- a/assets/wan_t2v/eval/samples.json +++ /dev/null @@ -1,6 +0,0 @@ -[ - { - "prompt": "A cat walks on the grass, realistic", - "negative_prompt": "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards" - } -] diff --git a/ci_check/awq_w4a16_fakequant_eval.yml b/ci_check/awq_w4a16_fakequant_eval.yml deleted file mode 100644 index e2baef7c5..000000000 --- a/ci_check/awq_w4a16_fakequant_eval.yml +++ /dev/null @@ -1,38 +0,0 @@ -base: - seed: &seed 42 -model: - type: Opt - path: /home/runner/work/LightCompress/LightCompress/ci_check/opt-125m - torch_dtype: auto -calib: - name: pileval - download: False - path: /home/runner/work/LightCompress/LightCompress/check/datasets/calib/pileval - n_samples: 4 # 128 - bs: -1 - seq_len: 16 # 512 - preproc: pileval_awq - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: /home/runner/work/LightCompress/LightCompress/check/datasets/eval/wikitext2 - bs: 1 - seq_len: 16 # 2048 - eval_token_consist: True -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - special: - trans: True - trans_version: v2 - weight_clip: True - clip_sym: False -save: - save_trans: False - save_path: /home/runner/work/LightCompress/LightCompress/save/opt-125m_awq_w4a16 diff --git a/ci_check/change_files.py b/ci_check/change_files.py deleted file mode 100644 index 6dc4b29af..000000000 --- a/ci_check/change_files.py +++ /dev/null @@ -1,193 +0,0 @@ -import os - -# 文件路径 -cpu_txt_path = "cpu.txt" - - -def modify_file(filepath, modifications): - with open(filepath, "r") as file: - lines = file.readlines() - - # 应用修改 - new_lines = [] - for line in lines: - # 替换操作 - for search, replace in modifications["modifications"]: - if search in line: - line = line.replace(search, replace) - new_lines.append(line) - - # 在文件开头插入新内容 - with open(filepath, "w") as file: - file.writelines(modifications["header"] + new_lines) - - -def main(): - with open(cpu_txt_path, "r") as file: - file_paths = file.readlines() - - for file_path in file_paths: - file_path = file_path.strip() - if not file_path: - continue - - if file_path == "../llmc/__main__.py": - modifications = { - "header": [ - 'device_zbl = "cpu"\n', - 'use_cuda = (device_zbl != "cpu")\n', - ], - "modifications": [ - ( - "torch.cuda.empty_cache()", - "if use_cuda: torch.cuda.empty_cache()" - ), - ( - "init_process_group(backend='nccl')", - "init_process_group(backend='gloo')" - ), - ( - "torch.cuda.set_device(int(os.environ['LOCAL_RANK']))", - "# torch.cuda.set_device(int(os.environ['LOCAL_RANK']))" - ) - ], - } - elif file_path == "../llmc/compression/quantization/awq.py": - modifications = { - "header": ["n_grid_zbl = 1\n"], - "modifications": [ - ( - "n_grid = 20", - "n_grid = n_grid_zbl" - ), - ( - "device='cuda'", - "device='cpu'" - ) - ], - } - elif file_path == "../llmc/compression/quantization/gptq.py": - modifications = { - "header": [ - 'device_zbl = "cpu"\n', - 'use_cuda = (device_zbl != "cpu")\n', - ], - "modifications": [ - ( - "torch.cuda.empty_cache()", - "if use_cuda: torch.cuda.empty_cache()" - ), - (".cuda()", ".to(device_zbl)"), - ("torch.device('cuda')", "torch.device('cpu')"), - ( - "torch.cuda.synchronize()", - "if use_cuda: torch.cuda.synchronize()" - ), - ], - } - elif ( - file_path - == "../llmc/compression/quantization/base_blockwise_quantization.py" - ): - modifications = { - "header": [ - 'device_zbl = "cpu"\n', - 'use_cuda = (device_zbl != "cpu")\n', - ], - "modifications": [ - (".cuda()", ".to(device_zbl)"), - ( - "torch.cuda.empty_cache()", - "if use_cuda: torch.cuda.empty_cache()", - ), - ], - } - elif file_path == "../llmc/models/base_model.py": - modifications = { - "header": [ - 'device_zbl = "cpu"\n', - 'use_cuda = (device_zbl != "cpu")\n', - ], - "modifications": [ - (".cuda()", ".to(device_zbl)"), - ( - - "self.move_embed_to_device('cuda')", - "self.move_embed_to_device(device_zbl)", - ), - ], - } - elif file_path == "../llmc/eval/eval_base.py": - modifications = { - "header": [ - 'device_zbl = "cpu"\n', - 'use_cuda = (device_zbl != "cpu")\n', - ], - "modifications": [ - (".cuda()", ".to(device_zbl)"), - ( - "torch.cuda.empty_cache()", - "if use_cuda: torch.cuda.empty_cache()", - ), - ], - } - elif file_path == "../llmc/eval/eval_ppl.py": - modifications = { - "header": [ - 'device_zbl = "cpu"\n', - 'use_cuda = (device_zbl != "cpu")\n', - "nsamples_zbl = 1\n", - ], - "modifications": [ - (".cuda()", ".to(device_zbl)"), - ( - "torch.cuda.empty_cache()", - "if use_cuda: torch.cuda.empty_cache()", - ), - ("nlls = []", "nlls = []; nsamples = nsamples_zbl"), - ], - } - elif file_path == "../llmc/eval/eval_token_consist.py": - modifications = { - "header": [ - 'device_zbl = "cpu"\n', - 'use_cuda = (device_zbl != "cpu")\n', - "nsamples_zbl = 1\n", - ], - "modifications": [ - (".cuda()", ".to(device_zbl)"), - ( - "torch.cuda.empty_cache()", - "if use_cuda: torch.cuda.empty_cache()", - ), - ("for i in range(0, nsamples, bs):", "for i in range(0, 1, 1):"), - ], - } - elif file_path== "../llmc/compression/quantization/auto_clip.py": - modifications = { - "header": [ - 'device_zbl = "cpu"\n', - 'use_cuda = (device_zbl != "cpu")\n', - ], - "modifications": [ - (".cuda()", ".to(device_zbl)"), - ( - "torch.cuda.empty_cache()", - "if use_cuda: torch.cuda.empty_cache()", - ), - ], - } - else: - print(f"File {file_path} not recognized or not specified for modification.") - continue - - # 修改文件 - if os.path.exists(file_path): - modify_file(file_path, modifications) - print(f'{file_path} was modefied successfully') - else: - print(f"File {file_path} does not exist.") - - -if __name__ == "__main__": - main() diff --git a/ci_check/cpu.txt b/ci_check/cpu.txt deleted file mode 100644 index 03dd8cda3..000000000 --- a/ci_check/cpu.txt +++ /dev/null @@ -1,9 +0,0 @@ -../llmc/compression/quantization/base_blockwise_quantization.py -../llmc/__main__.py -../llmc/eval/eval_base.py -../llmc/eval/eval_token_consist.py -../llmc/eval/eval_ppl.py -../llmc/compression/quantization/awq.py -../llmc/compression/quantization/gptq.py -../llmc/models/base_model.py -../llmc/compression/quantization/auto_clip.py diff --git a/ci_check/gptq_w_only.yml b/ci_check/gptq_w_only.yml deleted file mode 100644 index 03f64a893..000000000 --- a/ci_check/gptq_w_only.yml +++ /dev/null @@ -1,43 +0,0 @@ -base: - seed: &seed 0 -model: - type: Opt - path: /home/runner/work/LightCompress/LightCompress/ci_check/opt-125m - torch_dtype: auto -calib: - name: wikitext2 - download: False - n_samples: 4 - path: /home/runner/work/LightCompress/LightCompress/check/datasets/eval/wikitext2 - bs: 1 - seq_len: 16 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: /home/runner/work/LightCompress/LightCompress/check/datasets/eval/wikitext2 - bs: 1 - seq_len: 16 - inference_per_block: False - eval_token_consist: False -quant: - method: GPTQ - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - # calib_algo: mse - # mse_b_num: 2 - special: - actorder: True - static_groups: False - percdamp: 0.01 - blocksize: 128 - true_sequential: True - quant_out: True -save: - save_fake: False - save_path: /home/runner/work/LightCompress/LightCompress/save/opt-125m_gptq_w4a16 diff --git a/ci_check/model_urls.txt b/ci_check/model_urls.txt deleted file mode 100644 index fa8ce4e4c..000000000 --- a/ci_check/model_urls.txt +++ /dev/null @@ -1,6 +0,0 @@ -https://hf-mirror.com/facebook/opt-125m/resolve/main/config.json -https://hf-mirror.com/facebook/opt-125m/resolve/main/generation_config.json -https://hf-mirror.com/facebook/opt-125m/resolve/main/merges.txt -https://hf-mirror.com/facebook/opt-125m/resolve/main/special_tokens_map.json -https://hf-mirror.com/facebook/opt-125m/resolve/main/tokenizer_config.json -https://hf-mirror.com/facebook/opt-125m/resolve/main/vocab.json \ No newline at end of file diff --git a/ci_check/run_awq.sh b/ci_check/run_awq.sh deleted file mode 100644 index d5ad5dcb4..000000000 --- a/ci_check/run_awq.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -current_directory=$(pwd) -llmc=$(echo "$current_directory" | sed 's/\/ci_check$//') -export PYTHONPATH=$llmc:$PYTHONPATH - -config=${llmc}/ci_check/awq_w4a16_fakequant_eval.yml - -nnodes=1 -nproc_per_node=1 -MASTER_ADDR=127.0.0.1 -MASTER_PORT=$((10000 + RANDOM % 20000)) - -RANDOM=$(python -c 'import uuid; print(uuid.uuid4())') -task_id=$RANDOM - -cd ../scripts - -torchrun \ - --nnodes $nnodes \ - --nproc_per_node $nproc_per_node \ - --rdzv_id $task_id \ - --rdzv_backend c10d \ - --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ - ${llmc}/llmc/__main__.py --config $config --task_id $task_id \ diff --git a/ci_check/run_gptq.sh b/ci_check/run_gptq.sh deleted file mode 100644 index b13a0c8e3..000000000 --- a/ci_check/run_gptq.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -current_directory=$(pwd) -llmc=$(echo "$current_directory" | sed 's/\/ci_check$//') -export PYTHONPATH=$llmc:$PYTHONPATH - -config=${llmc}/ci_check/gptq_w_only.yml - -nnodes=1 -nproc_per_node=1 -MASTER_ADDR=127.0.0.1 -MASTER_PORT=$((10000 + RANDOM % 20000)) - -RANDOM=$(python -c 'import uuid; print(uuid.uuid4())') -task_id=$RANDOM - -cd ../scripts - -torchrun \ - --nnodes $nnodes \ - --nproc_per_node $nproc_per_node \ - --rdzv_id $task_id \ - --rdzv_backend c10d \ - --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ - ${llmc}/llmc/__main__.py --config $config --task_id $task_id \ diff --git a/configs/opencompass/eval_base.py b/configs/opencompass/eval_base.py deleted file mode 100755 index 262bc6bdf..000000000 --- a/configs/opencompass/eval_base.py +++ /dev/null @@ -1,18 +0,0 @@ -from mmengine.config import read_base -from opencompass.models import HuggingFaceBaseModel - - -with read_base(): - from opencompass.configs.datasets.humaneval.humaneval_gen import humaneval_datasets - -datasets = [*humaneval_datasets] - -models = [ - dict( - type=HuggingFaceBaseModel, - abbr='LLMC-OPENCOMPASS', - max_out_len=1024, - batch_size=8, - run_cfg=dict(num_gpus=1), - ) -] diff --git a/configs/opencompass/eval_chat.py b/configs/opencompass/eval_chat.py deleted file mode 100755 index 040a9dd50..000000000 --- a/configs/opencompass/eval_chat.py +++ /dev/null @@ -1,19 +0,0 @@ -from mmengine.config import read_base -from opencompass.models import HuggingFacewithChatTemplate - - -with read_base(): - from opencompass.configs.datasets.humaneval.humaneval_gen import humaneval_datasets - -datasets = [*humaneval_datasets] - -models = [ - dict( - type=HuggingFacewithChatTemplate, - abbr='LLMC-OPENCOMPASS', - max_out_len=1024, - batch_size=8, - run_cfg=dict(num_gpus=1), - stop_words=['<|end_of_text|>', '<|eot_id|>'], - ) -] diff --git a/configs/quantization/backend/autoawq/awq_w4a16.yml b/configs/quantization/backend/autoawq/awq_w4a16.yml deleted file mode 100644 index f5129e494..000000000 --- a/configs/quantization/backend/autoawq/awq_w4a16.yml +++ /dev/null @@ -1,43 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - # Available options: ['gemm_pack', 'gemv_pack'] - pack_version: gemm_pack - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -save: - save_autoawq: True - save_path: /path/to/save_for_autoawq_awq_w4/ diff --git a/configs/quantization/backend/autoawq/gptq_w4a16.yml b/configs/quantization/backend/autoawq/gptq_w4a16.yml deleted file mode 100644 index 130fccb94..000000000 --- a/configs/quantization/backend/autoawq/gptq_w4a16.yml +++ /dev/null @@ -1,45 +0,0 @@ -base: - seed: &seed 0 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: wikitext2 - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: GPTQ - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - # Available options: ['gemm_pack', 'gemv_pack'] - pack_version: gemm_pack - special: - actorder: True - static_groups: True - percdamp: 0.01 - blocksize: 128 - true_sequential: True - quant_out: True -save: - save_autoawq: True - save_path: /path/to/save_for_autoawq_gptq_omni_w4/ diff --git a/configs/quantization/backend/autoawq/rtn_w4a16.yml b/configs/quantization/backend/autoawq/rtn_w4a16.yml deleted file mode 100644 index 83729b72c..000000000 --- a/configs/quantization/backend/autoawq/rtn_w4a16.yml +++ /dev/null @@ -1,27 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -quant: - method: RTN - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - # Available options: ['gemm_pack', 'gemv_pack'] - pack_version: gemm_pack -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 - inference_per_block: False -save: - save_autoawq: True - save_path: /path/to/save_for_autoawq_rtn_w4/ diff --git a/configs/quantization/backend/autoawq/w4a16_combin/step_1_awq.yml b/configs/quantization/backend/autoawq/w4a16_combin/step_1_awq.yml deleted file mode 100644 index 178a9a1f3..000000000 --- a/configs/quantization/backend/autoawq/w4a16_combin/step_1_awq.yml +++ /dev/null @@ -1,46 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - calib_algo: minmax - special: - trans: True - trans_version: v1 - weight_clip: False - # For weight-only AWQ+Omni mode, there's no need to save scale factors; - # Only the AWQ-transformed model needs to be saved. - save_scale: False - save_clip: False -save: - # Save the AWQ-transformed model. - save_trans: True - save_path: /path/to/save_awq_trans/ diff --git a/configs/quantization/backend/autoawq/w4a16_combin/step_2_omniq.yml b/configs/quantization/backend/autoawq/w4a16_combin/step_2_omniq.yml deleted file mode 100644 index 5a73731c5..000000000 --- a/configs/quantization/backend/autoawq/w4a16_combin/step_2_omniq.yml +++ /dev/null @@ -1,57 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - # Load AWQ-transformed model - path: /path/to/save_awq_trans/transformed_model - torch_dtype: auto -calib: - name: wikitext2 - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: OmniQuant - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - calib_algo: learnable - ste: True - # Available options: ['gemm_pack', 'gemv_pack'] - pack_version: gemm_pack - special: - aug_loss: False - lwc: True - let: False - lwc_lr: 0.01 - let_lr: 0.005 - use_shift: False - alpha: 0.5 - deactive_amp: True - epochs: 5 - wd: 0 - # Use AWQ's search clip factors to initialize OmniQuant's clip factors, - # Then refine them through learning (LWC). - # Only the version v2 clipping method supports LWC. - # This process is automatically handled in OmniQuant's code. - search_clip_init: True - quant_out: True -save: - save_autoawq: True - save_path: /path/to/save_for_autoawq_awq_omni_w4/ diff --git a/configs/quantization/backend/mlcllm/awq_w4a16.yml b/configs/quantization/backend/mlcllm/awq_w4a16.yml deleted file mode 100644 index acba0e3c0..000000000 --- a/configs/quantization/backend/mlcllm/awq_w4a16.yml +++ /dev/null @@ -1,43 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - # Available options: ['gemm_pack'] - pack_version: gemm_pack - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -save: - save_mlcllm: True - save_path: /path/to/save_for_mlcllm_awq_w4/ diff --git a/configs/quantization/backend/mlcllm/gptq_w4a16.yml b/configs/quantization/backend/mlcllm/gptq_w4a16.yml deleted file mode 100644 index 752085736..000000000 --- a/configs/quantization/backend/mlcllm/gptq_w4a16.yml +++ /dev/null @@ -1,43 +0,0 @@ -base: - seed: &seed 0 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: wikitext2 - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 - inference_per_block: False -quant: - method: GPTQ - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - # Available options: ['gemm_pack'] - pack_version: gemm_pack - special: - actorder: True - static_groups: True - percdamp: 0.01 - blocksize: 128 - true_sequential: True - quant_out: True -save: - save_mlcllm: True - save_path: /path/to/save_for_mlcllm_gptq_w4/ diff --git a/configs/quantization/backend/mlcllm/rtn_w4a16.yml b/configs/quantization/backend/mlcllm/rtn_w4a16.yml deleted file mode 100644 index d7e4d101d..000000000 --- a/configs/quantization/backend/mlcllm/rtn_w4a16.yml +++ /dev/null @@ -1,27 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -quant: - method: RTN - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - # Available options: ['gemm_pack'] - pack_version: gemm_pack -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 - inference_per_block: False -save: - save_mlcllm: True - save_path: /path/to/save_for_mlcllm_rtn_w4/ diff --git a/configs/quantization/backend/mlcllm/w4a16_combin/step_1_awq.yml b/configs/quantization/backend/mlcllm/w4a16_combin/step_1_awq.yml deleted file mode 100644 index 178a9a1f3..000000000 --- a/configs/quantization/backend/mlcllm/w4a16_combin/step_1_awq.yml +++ /dev/null @@ -1,46 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - calib_algo: minmax - special: - trans: True - trans_version: v1 - weight_clip: False - # For weight-only AWQ+Omni mode, there's no need to save scale factors; - # Only the AWQ-transformed model needs to be saved. - save_scale: False - save_clip: False -save: - # Save the AWQ-transformed model. - save_trans: True - save_path: /path/to/save_awq_trans/ diff --git a/configs/quantization/backend/mlcllm/w4a16_combin/step_2_omniq.yml b/configs/quantization/backend/mlcllm/w4a16_combin/step_2_omniq.yml deleted file mode 100644 index 5d1c69757..000000000 --- a/configs/quantization/backend/mlcllm/w4a16_combin/step_2_omniq.yml +++ /dev/null @@ -1,57 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - # Load AWQ-transformed model - path: /path/to/save_awq_trans/transformed_model - torch_dtype: auto -calib: - name: wikitext2 - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: OmniQuant - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - calib_algo: learnable - ste: True - # Available options: ['gemm_pack'] - pack_version: gemm_pack - special: - aug_loss: False - lwc: True - let: False - lwc_lr: 0.01 - let_lr: 0.005 - use_shift: False - alpha: 0.5 - deactive_amp: True - epochs: 5 - wd: 0 - # Use AWQ's search clip factors to initialize OmniQuant's clip factors, - # Then refine them through learning (LWC). - # Only the version v2 clipping method supports LWC. - # This process is automatically handled in OmniQuant's code. - search_clip_init: True - quant_out: True -save: - save_mlcllm: True - save_path: /path/to/save_for_mlcllm_awq_omni_w4/ diff --git a/configs/quantization/backend/sglang/awq_w4a16.yml b/configs/quantization/backend/sglang/awq_w4a16.yml deleted file mode 100644 index f01ce5ab0..000000000 --- a/configs/quantization/backend/sglang/awq_w4a16.yml +++ /dev/null @@ -1,42 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 - need_pack: True - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -save: - save_sgl: True - save_path: /path/to/save_for_sgl_awq_w4/ diff --git a/configs/quantization/backend/sglang/awq_w8a8.yml b/configs/quantization/backend/sglang/awq_w8a8.yml deleted file mode 100644 index 392c426b7..000000000 --- a/configs/quantization/backend/sglang/awq_w8a8.yml +++ /dev/null @@ -1,45 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -save: - save_sgl: True - save_path: /path/to/save_for_sgl_awq_w8a8/ diff --git a/configs/quantization/backend/sglang/fp8/awq_fp8.yml b/configs/quantization/backend/sglang/fp8/awq_fp8.yml deleted file mode 100644 index b2f5396b9..000000000 --- a/configs/quantization/backend/sglang/fp8/awq_fp8.yml +++ /dev/null @@ -1,50 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - quant_type: float-quant - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_channel - use_qtorch: True - act: - quant_type: float-quant - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_token - use_qtorch: True - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -save: - save_sgl: True - save_path: /path/to/save_for_sgl_awq_fp8/ diff --git a/configs/quantization/backend/sglang/fp8/awq_fp8_static.yml b/configs/quantization/backend/sglang/fp8/awq_fp8_static.yml deleted file mode 100644 index 6c86e55a7..000000000 --- a/configs/quantization/backend/sglang/fp8/awq_fp8_static.yml +++ /dev/null @@ -1,51 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - quant_type: float-quant - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_tensor - use_qtorch: True - act: - quant_type: float-quant - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_tensor - use_qtorch: True - static: True - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -save: - save_sgl: True - save_path: /path/to/save_for_sgl_awq_fp8/ diff --git a/configs/quantization/backend/sglang/fp8/gptq_fp8.yml b/configs/quantization/backend/sglang/fp8/gptq_fp8.yml deleted file mode 100644 index 85b4bde2b..000000000 --- a/configs/quantization/backend/sglang/fp8/gptq_fp8.yml +++ /dev/null @@ -1,52 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: wikitext2 - download: False - n_samples: 128 - path: calib data path - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: GPTQ - weight: - quant_type: float-quant - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_channel - use_qtorch: True - act: - quant_type: float-quant - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_token - use_qtorch: True - special: - actorder: True - static_groups: False - percdamp: 0.01 - blocksize: 128 - true_sequential: True - quant_out: True -save: - save_sgl: True - save_path: /path/to/save_for_sgl_gptq_fp8/ diff --git a/configs/quantization/backend/sglang/fp8/rtn_fp8.yml b/configs/quantization/backend/sglang/fp8/rtn_fp8.yml deleted file mode 100644 index 849973feb..000000000 --- a/configs/quantization/backend/sglang/fp8/rtn_fp8.yml +++ /dev/null @@ -1,34 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: RTN - weight: - quant_type: float-quant - bit: e4m3 - symmetric: True - granularity: per_channel - use_qtorch: True - act: - quant_type: float-quant - bit: e4m3 - symmetric: True - granularity: per_token - use_qtorch: True -save: - save_sgl: True - save_path: /path/to/save_for_sgl_rtn_fp8/ diff --git a/configs/quantization/backend/sglang/fp8/smoothquant_fp8.yml b/configs/quantization/backend/sglang/fp8/smoothquant_fp8.yml deleted file mode 100644 index e0caa7aee..000000000 --- a/configs/quantization/backend/sglang/fp8/smoothquant_fp8.yml +++ /dev/null @@ -1,41 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 512 - bs: 1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 -quant: - method: SmoothQuant - weight: - quant_type: float-quant - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_channel - use_qtorch: True - act: - quant_type: float-quant - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_token - use_qtorch: True -save: - save_sgl: True - save_path: /path/to/save_for_sgl_smooth_fp8/ diff --git a/configs/quantization/backend/sglang/gptq_w4a16.yml b/configs/quantization/backend/sglang/gptq_w4a16.yml deleted file mode 100644 index f33e95df7..000000000 --- a/configs/quantization/backend/sglang/gptq_w4a16.yml +++ /dev/null @@ -1,41 +0,0 @@ -base: - seed: &seed 0 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: wikitext2 - download: False - n_samples: 128 - path: calib data path - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 - inference_per_block: False -quant: - method: GPTQ - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 - need_pack: True - special: - actorder: True - static_groups: True - percdamp: 0.01 - blocksize: 128 - true_sequential: True - quant_out: True -save: - save_sgl: True - save_path: /path/to/save_for_sgl_gptq_w4/ diff --git a/configs/quantization/backend/sglang/rtn_w4a16.yml b/configs/quantization/backend/sglang/rtn_w4a16.yml deleted file mode 100644 index b003b2284..000000000 --- a/configs/quantization/backend/sglang/rtn_w4a16.yml +++ /dev/null @@ -1,25 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -quant: - method: RTN - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 - need_pack: True -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 - inference_per_block: False -save: - save_sgl: True - save_path: /path/to/save_for_sgl_rtn/ diff --git a/configs/quantization/backend/sglang/rtn_w8a16.yml b/configs/quantization/backend/sglang/rtn_w8a16.yml deleted file mode 100644 index c53748a75..000000000 --- a/configs/quantization/backend/sglang/rtn_w8a16.yml +++ /dev/null @@ -1,25 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -quant: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_group - group_size: 128 - need_pack: True -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 - inference_per_block: False -save: - save_sgl: True - save_path: /path/to/save_for_sgl_rtn_w8a16/ diff --git a/configs/quantization/backend/sglang/rtn_w8a8.yml b/configs/quantization/backend/sglang/rtn_w8a8.yml deleted file mode 100644 index 1b163d341..000000000 --- a/configs/quantization/backend/sglang/rtn_w8a8.yml +++ /dev/null @@ -1,28 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -quant: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 - inference_per_block: False -save: - save_sgl: True - save_path: /path/to/save_for_sgl_rtn_w8a8/ diff --git a/configs/quantization/backend/sglang/smoothquant_w8a8.yml b/configs/quantization/backend/sglang/smoothquant_w8a8.yml deleted file mode 100644 index 78c8409d1..000000000 --- a/configs/quantization/backend/sglang/smoothquant_w8a8.yml +++ /dev/null @@ -1,35 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 512 - bs: 1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 -quant: - method: SmoothQuant - weight: - bit: 8 - symmetric: True - granularity: per_channel - act: - bit: 8 - symmetric: True - granularity: per_token -save: - save_sgl: True - save_path: /path/to/save_for_sgl_smooth_w8a8/ diff --git a/configs/quantization/backend/sglang/w4a16_combin/step_1_awq.yml b/configs/quantization/backend/sglang/w4a16_combin/step_1_awq.yml deleted file mode 100644 index e3f147c0c..000000000 --- a/configs/quantization/backend/sglang/w4a16_combin/step_1_awq.yml +++ /dev/null @@ -1,46 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 - calib_algo: minmax - special: - trans: True - trans_version: v1 - weight_clip: False - # For weight-only AWQ+Omni mode, there's no need to save scale factors; - # Only the AWQ-transformed model needs to be saved. - save_scale: False - save_clip: False -save: - # Save the AWQ-transformed model. - save_trans: True - save_path: /path/to/save_awq_trans/ diff --git a/configs/quantization/backend/sglang/w4a16_combin/step_2_omniq.yml b/configs/quantization/backend/sglang/w4a16_combin/step_2_omniq.yml deleted file mode 100644 index 5c25cf59f..000000000 --- a/configs/quantization/backend/sglang/w4a16_combin/step_2_omniq.yml +++ /dev/null @@ -1,56 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - # Load AWQ-transformed model - path: /path/to/save_awq_trans/transformed_model - torch_dtype: auto -calib: - name: wikitext2 - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: OmniQuant - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 - calib_algo: learnable - ste: True - need_pack: True - special: - aug_loss: False - lwc: True - let: False - lwc_lr: 0.01 - let_lr: 0.005 - use_shift: False - alpha: 0.5 - deactive_amp: True - epochs: 5 - wd: 0 - # Use AWQ's search clip factors to initialize OmniQuant's clip factors, - # Then refine them through learning (LWC). - # Only the version v2 clipping method supports LWC. - # This process is automatically handled in OmniQuant's code. - search_clip_init: True - quant_out: True -save: - save_sgl: True - save_path: /path/to/save_for_sgl_awq_omni_w4/ diff --git a/configs/quantization/backend/sglang/w8a8_combin/step_1_quarot.yml b/configs/quantization/backend/sglang/w8a8_combin/step_1_quarot.yml deleted file mode 100644 index 0aeb40a0c..000000000 --- a/configs/quantization/backend/sglang/w8a8_combin/step_1_quarot.yml +++ /dev/null @@ -1,39 +0,0 @@ -base: - seed: &seed 0 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: /home/gushiqiao/nvme/nvme0/yongyang/llm_datasets/llmc/eval/wikitext2 - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Quarot - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - qmax_to_tensor: True - calib_algo: minmax - act: - bit: 8 - symmetric: True - granularity: per_token - qmax_to_tensor: True - special: - rotate_mode: hadamard - fp32_had: True - online_rotate: False -save: - # Save the Quarot-transformed model. - save_trans: True - save_path: /path/to/save_quarot_trans_for_gptq/ diff --git a/configs/quantization/backend/sglang/w8a8_combin/step_2_gptq.yml b/configs/quantization/backend/sglang/w8a8_combin/step_2_gptq.yml deleted file mode 100644 index 26be68118..000000000 --- a/configs/quantization/backend/sglang/w8a8_combin/step_2_gptq.yml +++ /dev/null @@ -1,53 +0,0 @@ -base: - seed: &seed 0 -model: - type: model_type - # Load Quarot-transformed model - path: /path/to/save_quarot_trans_for_gptq/transformed_model - torch_dtype: auto -calib: - name: wikitext2 - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: GPTQ - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - qmax_to_tensor: True - calib_algo: minmax - act: - bit: 8 - symmetric: True - granularity: per_token - qmax_to_tensor: True - calib_algo: minmax - special: - actorder: True - static_groups: True - percdamp: 0.01 - blocksize: 128 - true_sequential: True - online_rotate: False - fp32_had: True - quant_out: True -save: - save_sgl: True - save_path: /path/to/save_for_sgl_awq_omni_w4/ diff --git a/configs/quantization/backend/trtllm/awq_w4a16.yml b/configs/quantization/backend/trtllm/awq_w4a16.yml deleted file mode 100644 index 65cb6ceab..000000000 --- a/configs/quantization/backend/trtllm/awq_w4a16.yml +++ /dev/null @@ -1,36 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: pileval_awq - seed: *seed -eval: - eval_pos: [] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 -quant: - method: Awq - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 -save: - save_trans: False - save_trtllm: True - trtllm_cfg: - tp_size: 1 - pp_size: 1 - save_path: ./save diff --git a/configs/quantization/backend/trtllm/smoothquant_w8a8.yml b/configs/quantization/backend/trtllm/smoothquant_w8a8.yml deleted file mode 100644 index a6ff9c645..000000000 --- a/configs/quantization/backend/trtllm/smoothquant_w8a8.yml +++ /dev/null @@ -1,35 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llama - path: model path - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 512 - bs: 1 - seq_len: 512 - preproc: pileval_smooth - seed: *seed -eval: - eval_pos: [] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 -quant: - method: SmoothQuant - weight: - bit: 8 - symmetric: True - granularity: per_channel - act: - bit: 8 - symmetric: True - granularity: per_token -save: - save_trans: True - save_path: ./save diff --git a/configs/quantization/backend/vllm/awq_w4a16.yml b/configs/quantization/backend/vllm/awq_w4a16.yml deleted file mode 100644 index fd6800efa..000000000 --- a/configs/quantization/backend/vllm/awq_w4a16.yml +++ /dev/null @@ -1,42 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 - need_pack: True - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -save: - save_vllm: True - save_path: /path/to/save_for_vllm_awq_w4/ diff --git a/configs/quantization/backend/vllm/awq_w8a8.yml b/configs/quantization/backend/vllm/awq_w8a8.yml deleted file mode 100644 index c23dfbfb4..000000000 --- a/configs/quantization/backend/vllm/awq_w8a8.yml +++ /dev/null @@ -1,45 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -save: - save_vllm: True - save_path: /path/to/save_for_vllm_awq_w8a8/ diff --git a/configs/quantization/backend/vllm/fp8/awq_fp8.yml b/configs/quantization/backend/vllm/fp8/awq_fp8.yml deleted file mode 100644 index 3a282259f..000000000 --- a/configs/quantization/backend/vllm/fp8/awq_fp8.yml +++ /dev/null @@ -1,50 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - quant_type: float-quant - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_channel - use_qtorch: True - act: - quant_type: float-quant - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_token - use_qtorch: True - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -save: - save_vllm: True - save_path: /path/to/save_for_vllm_awq_fp8/ diff --git a/configs/quantization/backend/vllm/fp8/awq_fp8_static.yml b/configs/quantization/backend/vllm/fp8/awq_fp8_static.yml deleted file mode 100644 index c45425079..000000000 --- a/configs/quantization/backend/vllm/fp8/awq_fp8_static.yml +++ /dev/null @@ -1,51 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - quant_type: float-quant - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_tensor - use_qtorch: True - act: - quant_type: float-quant - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_tensor - use_qtorch: True - static: True - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -save: - save_vllm: True - save_path: /path/to/save_for_vllm_awq_fp8/ diff --git a/configs/quantization/backend/vllm/fp8/gptq_fp8.yml b/configs/quantization/backend/vllm/fp8/gptq_fp8.yml deleted file mode 100644 index 905be88af..000000000 --- a/configs/quantization/backend/vllm/fp8/gptq_fp8.yml +++ /dev/null @@ -1,52 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: wikitext2 - download: False - n_samples: 128 - path: calib data path - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake-quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: GPTQ - weight: - quant_type: float-quant - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_channel - use_qtorch: True - act: - quant_type: float-quant - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_token - use_qtorch: True - special: - actorder: True - static_groups: False - percdamp: 0.01 - blocksize: 128 - true_sequential: True - quant_out: True -save: - save_sgl: True - save_path: /path/to/save_for_vllm_gptq_fp8/ diff --git a/configs/quantization/backend/vllm/fp8/rtn_fp8.yml b/configs/quantization/backend/vllm/fp8/rtn_fp8.yml deleted file mode 100644 index f06f492ec..000000000 --- a/configs/quantization/backend/vllm/fp8/rtn_fp8.yml +++ /dev/null @@ -1,34 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: RTN - weight: - quant_type: float-quant - bit: e4m3 - symmetric: True - granularity: per_channel - use_qtorch: True - act: - quant_type: float-quant - bit: e4m3 - symmetric: True - granularity: per_token - use_qtorch: True -save: - save_vllm: True - save_path: /path/to/save_for_vllm_rtn_fp8/ diff --git a/configs/quantization/backend/vllm/fp8/smoothquant_fp8.yml b/configs/quantization/backend/vllm/fp8/smoothquant_fp8.yml deleted file mode 100644 index 1c97ce110..000000000 --- a/configs/quantization/backend/vllm/fp8/smoothquant_fp8.yml +++ /dev/null @@ -1,41 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 512 - bs: 1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 -quant: - method: SmoothQuant - weight: - quant_type: float-quant - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_channel - use_qtorch: True - act: - quant_type: float-quant - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_token - use_qtorch: True -save: - save_vllm: True - save_path: /path/to/save_for_vllm_smooth_fp8/ diff --git a/configs/quantization/backend/vllm/gptq_w4a16.yml b/configs/quantization/backend/vllm/gptq_w4a16.yml deleted file mode 100644 index 3aadf5257..000000000 --- a/configs/quantization/backend/vllm/gptq_w4a16.yml +++ /dev/null @@ -1,41 +0,0 @@ -base: - seed: &seed 0 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: wikitext2 - download: False - n_samples: 128 - path: calib data path - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 - inference_per_block: False -quant: - method: GPTQ - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 - need_pack: True - special: - actorder: True - static_groups: True - percdamp: 0.01 - blocksize: 128 - true_sequential: True - quant_out: True -save: - save_vllm: True - save_path: /path/to/save_for_vllm_gptq_w4/ diff --git a/configs/quantization/backend/vllm/rtn_w4a16.yml b/configs/quantization/backend/vllm/rtn_w4a16.yml deleted file mode 100644 index 04680bc28..000000000 --- a/configs/quantization/backend/vllm/rtn_w4a16.yml +++ /dev/null @@ -1,25 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -quant: - method: RTN - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 - need_pack: True -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 - inference_per_block: False -save: - save_vllm: True - save_path: /path/to/save_for_vllm_rtn/ diff --git a/configs/quantization/backend/vllm/rtn_w4a16_dsv3.yml b/configs/quantization/backend/vllm/rtn_w4a16_dsv3.yml deleted file mode 100644 index 210264682..000000000 --- a/configs/quantization/backend/vllm/rtn_w4a16_dsv3.yml +++ /dev/null @@ -1,18 +0,0 @@ -base: - seed: &seed 42 -model: - type: DeepseekV3 - path: model_path - tokenizer_mode: fast - torch_dtype: torch.float8_e4m3fn -quant: - method: RTN - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 - need_pack: True -save: - save_vllm: True - save_path: /path/to/save_for_vllm_rtn/ diff --git a/configs/quantization/backend/vllm/rtn_w8a16.yml b/configs/quantization/backend/vllm/rtn_w8a16.yml deleted file mode 100644 index 4bf117250..000000000 --- a/configs/quantization/backend/vllm/rtn_w8a16.yml +++ /dev/null @@ -1,25 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -quant: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_group - group_size: 128 - need_pack: True -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 - inference_per_block: False -save: - save_vllm: True - save_path: /path/to/save_for_vllm_rtn_w8a16/ diff --git a/configs/quantization/backend/vllm/rtn_w8a8.yml b/configs/quantization/backend/vllm/rtn_w8a8.yml deleted file mode 100644 index c88380cac..000000000 --- a/configs/quantization/backend/vllm/rtn_w8a8.yml +++ /dev/null @@ -1,28 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -quant: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 - inference_per_block: False -save: - save_vllm: True - save_path: /path/to/save_for_vllm_rtn_w8a8/ diff --git a/configs/quantization/backend/vllm/smoothquant_w8a8.yml b/configs/quantization/backend/vllm/smoothquant_w8a8.yml deleted file mode 100644 index 83ecfaea6..000000000 --- a/configs/quantization/backend/vllm/smoothquant_w8a8.yml +++ /dev/null @@ -1,35 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 512 - bs: 1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 -quant: - method: SmoothQuant - weight: - bit: 8 - symmetric: True - granularity: per_channel - act: - bit: 8 - symmetric: True - granularity: per_token -save: - save_vllm: True - save_path: /path/to/save_for_vllm_smooth_w8a8/ diff --git a/configs/quantization/backend/vllm/tesseraq_w4a16.yml b/configs/quantization/backend/vllm/tesseraq_w4a16.yml deleted file mode 100644 index 30ec3c234..000000000 --- a/configs/quantization/backend/vllm/tesseraq_w4a16.yml +++ /dev/null @@ -1,58 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llama - path: meta-llama/Meta-Llama-3.1-70B - torch_dtype: auto -calib: - name: c4 - download: False - n_samples: 256 - path: ../cache/data/calib/c4 - bs: 1 - seq_len: 2048 - preproc: c4_gptq - seed: *seed -eval: - eval_pos: [nahh] # - name: [wikitext2, c4] - download: False - path: ../cache/data/eval - bs: 10 - seq_len: 2048 - inference_per_block: True - tasks: piqa,arc_easy,arc_challenge,hellaswag,winogrande -quant: - method: GPTBRECQ - weight: - bit: 4 - symmetric: True - granularity: per_channel - group_size: -1 - int_range: [-8, 7] - pack_mode: vllm_pack - calib_algo: learnable - special: - lr: 0.0005 - iterations: 250 - wd: 0.0 - batch_size: 2 - deactive_amp: False - aug_loss: False - optimize_scale: True - scale_lr: 0.0005 - thresholds: [0.75, 0.5, 0.375, 0.25, 0.125, 0.09, 0.06, 0.04, 0.02, 0.005] - weight_clip: True - load_transform: True - reduce_memory: True - clip_version: v2 - scale_path: ../cache/activations/L31_70b/awq_w4 - clip_path: ../cache/activations/L31_70b/awq_w4 - quant_out: True -save: - save_fp: False - save_trans: False - save_lightllm: False - save_autogptq: False - save_vllm: True - save_path: ../cache/ckpt/gptbrecq_w4_L31_70b/ diff --git a/configs/quantization/backend/vllm/w4a16_combin/step_1_awq.yml b/configs/quantization/backend/vllm/w4a16_combin/step_1_awq.yml deleted file mode 100644 index e3f147c0c..000000000 --- a/configs/quantization/backend/vllm/w4a16_combin/step_1_awq.yml +++ /dev/null @@ -1,46 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 - calib_algo: minmax - special: - trans: True - trans_version: v1 - weight_clip: False - # For weight-only AWQ+Omni mode, there's no need to save scale factors; - # Only the AWQ-transformed model needs to be saved. - save_scale: False - save_clip: False -save: - # Save the AWQ-transformed model. - save_trans: True - save_path: /path/to/save_awq_trans/ diff --git a/configs/quantization/backend/vllm/w4a16_combin/step_2_omniq.yml b/configs/quantization/backend/vllm/w4a16_combin/step_2_omniq.yml deleted file mode 100644 index 466e1cfd1..000000000 --- a/configs/quantization/backend/vllm/w4a16_combin/step_2_omniq.yml +++ /dev/null @@ -1,56 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - # Load AWQ-transformed model - path: /path/to/save_awq_trans/transformed_model - torch_dtype: auto -calib: - name: wikitext2 - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: OmniQuant - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 - calib_algo: learnable - ste: True - need_pack: True - special: - aug_loss: False - lwc: True - let: False - lwc_lr: 0.01 - let_lr: 0.005 - use_shift: False - alpha: 0.5 - deactive_amp: True - epochs: 5 - wd: 0 - # Use AWQ's search clip factors to initialize OmniQuant's clip factors, - # Then refine them through learning (LWC). - # Only the version v2 clipping method supports LWC. - # This process is automatically handled in OmniQuant's code. - search_clip_init: True - quant_out: True -save: - save_vllm: True - save_path: /path/to/save_for_vllm_awq_omni_w4/ diff --git a/configs/quantization/backend/vllm/w8a8_combin/step_1_quarot.yml b/configs/quantization/backend/vllm/w8a8_combin/step_1_quarot.yml deleted file mode 100644 index 0aeb40a0c..000000000 --- a/configs/quantization/backend/vllm/w8a8_combin/step_1_quarot.yml +++ /dev/null @@ -1,39 +0,0 @@ -base: - seed: &seed 0 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: /home/gushiqiao/nvme/nvme0/yongyang/llm_datasets/llmc/eval/wikitext2 - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Quarot - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - qmax_to_tensor: True - calib_algo: minmax - act: - bit: 8 - symmetric: True - granularity: per_token - qmax_to_tensor: True - special: - rotate_mode: hadamard - fp32_had: True - online_rotate: False -save: - # Save the Quarot-transformed model. - save_trans: True - save_path: /path/to/save_quarot_trans_for_gptq/ diff --git a/configs/quantization/backend/vllm/w8a8_combin/step_2_gptq.yml b/configs/quantization/backend/vllm/w8a8_combin/step_2_gptq.yml deleted file mode 100644 index ab857a0f8..000000000 --- a/configs/quantization/backend/vllm/w8a8_combin/step_2_gptq.yml +++ /dev/null @@ -1,53 +0,0 @@ -base: - seed: &seed 0 -model: - type: model_type - # Load Quarot-transformed model - path: /path/to/save_quarot_trans_for_gptq/transformed_model - torch_dtype: auto -calib: - name: wikitext2 - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: GPTQ - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - qmax_to_tensor: True - calib_algo: minmax - act: - bit: 8 - symmetric: True - granularity: per_token - qmax_to_tensor: True - calib_algo: minmax - special: - actorder: True - static_groups: True - percdamp: 0.01 - blocksize: 128 - true_sequential: True - online_rotate: False - fp32_had: True - quant_out: True -save: - save_vllm: True - save_path: /path/to/save_for_vllm_awq_omni_w4/ diff --git a/configs/quantization/combination/awq_comb_omni/w2a16g64/step_1_awq.yml b/configs/quantization/combination/awq_comb_omni/w2a16g64/step_1_awq.yml deleted file mode 100644 index abfcbc140..000000000 --- a/configs/quantization/combination/awq_comb_omni/w2a16g64/step_1_awq.yml +++ /dev/null @@ -1,47 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 2 - symmetric: False - granularity: per_group - group_size: 64 - calib_algo: minmax - special: - trans: True - trans_version: v1 - weight_clip: True - # For weight-only AWQ+Omni mode, there's no need to save scale factors; - # Only the AWQ-transformed model needs to be saved. - save_scale: False - save_clip: False -save: - # Save the AWQ-transformed model for omniquant. - save_trans: True - save_fake: False - save_path: /path/to/save_awq_trans/ diff --git a/configs/quantization/combination/awq_comb_omni/w2a16g64/step_2_omniq.yml b/configs/quantization/combination/awq_comb_omni/w2a16g64/step_2_omniq.yml deleted file mode 100644 index 328bf6946..000000000 --- a/configs/quantization/combination/awq_comb_omni/w2a16g64/step_2_omniq.yml +++ /dev/null @@ -1,56 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - # Load AWQ-transformed model - path: /path/to/save_awq_trans/transformed_model - torch_dtype: auto -calib: - name: wikitext2 - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: OmniQuant - weight: - bit: 2 - symmetric: False - granularity: per_group - group_size: 64 - calib_algo: learnable - ste: True - special: - aug_loss: True - lwc: True - let: False - lwc_lr: 0.01 - let_lr: 0.005 - use_shift: False - alpha: 0.5 - deactive_amp: True - epochs: 5 - wd: 0 - # Use AWQ's search clip factors to initialize OmniQuant's clip factors, - # Then refine them through learning (LWC). - # Only the version v2 clipping method supports LWC. - # This process is automatically handled in OmniQuant's code. - search_clip_init: True - quant_out: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/combination/awq_comb_omni/w3a16g128/step_1_awq.yml b/configs/quantization/combination/awq_comb_omni/w3a16g128/step_1_awq.yml deleted file mode 100644 index 6c0ab0da2..000000000 --- a/configs/quantization/combination/awq_comb_omni/w3a16g128/step_1_awq.yml +++ /dev/null @@ -1,47 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 3 - symmetric: False - granularity: per_group - group_size: 128 - calib_algo: minmax - special: - trans: True - trans_version: v1 - weight_clip: False - # For weight-only AWQ+Omni mode, there's no need to save scale factors; - # Only the AWQ-transformed model needs to be saved. - save_scale: False - save_clip: False -save: - # Save the AWQ-transformed model. - save_trans: True - save_fake: False - save_path: /path/to/save_awq_trans/ diff --git a/configs/quantization/combination/awq_comb_omni/w3a16g128/step_2_omniq.yml b/configs/quantization/combination/awq_comb_omni/w3a16g128/step_2_omniq.yml deleted file mode 100644 index 53b93041e..000000000 --- a/configs/quantization/combination/awq_comb_omni/w3a16g128/step_2_omniq.yml +++ /dev/null @@ -1,56 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - # Load AWQ-transformed model - path: /path/to/save_awq_trans/transformed_model - torch_dtype: auto -calib: - name: wikitext2 - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: OmniQuant - weight: - bit: 3 - symmetric: False - granularity: per_group - group_size: 128 - calib_algo: learnable - ste: True - special: - aug_loss: True - lwc: True - let: False - lwc_lr: 0.01 - let_lr: 0.005 - use_shift: False - alpha: 0.5 - deactive_amp: True - epochs: 5 - wd: 0 - # Use AWQ's search clip factors to initialize OmniQuant's clip factors, - # Then refine them through learning (LWC). - # Only the version v2 clipping method supports LWC. - # This process is automatically handled in OmniQuant's code. - search_clip_init: True - quant_out: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/combination/awq_comb_omni/w4a16g128/step_1_awq.yml b/configs/quantization/combination/awq_comb_omni/w4a16g128/step_1_awq.yml deleted file mode 100644 index 77116658b..000000000 --- a/configs/quantization/combination/awq_comb_omni/w4a16g128/step_1_awq.yml +++ /dev/null @@ -1,47 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - calib_algo: minmax - special: - trans: True - trans_version: v1 - weight_clip: False - # For weight-only AWQ+Omni mode, there's no need to save scale factors; - # Only the AWQ-transformed model needs to be saved. - save_scale: False - save_clip: False -save: - # Save the AWQ-transformed model. - save_trans: False - save_fake: False - save_path: /path/to/save_awq_trans/ diff --git a/configs/quantization/combination/awq_comb_omni/w4a16g128/step_2_omniq.yml b/configs/quantization/combination/awq_comb_omni/w4a16g128/step_2_omniq.yml deleted file mode 100644 index 3d7ba4810..000000000 --- a/configs/quantization/combination/awq_comb_omni/w4a16g128/step_2_omniq.yml +++ /dev/null @@ -1,56 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - # Load AWQ-transformed model - path: /path/to/save_awq_trans/transformed_model - torch_dtype: auto -calib: - name: wikitext2 - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: OmniQuant - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - calib_algo: learnable - ste: True - special: - aug_loss: True - lwc: True - let: False - lwc_lr: 0.01 - let_lr: 0.005 - use_shift: False - alpha: 0.5 - deactive_amp: True - epochs: 5 - wd: 0 - # Use AWQ's search clip factors to initialize OmniQuant's clip factors, - # Then refine them through learning (LWC). - # Only the version v2 clipping method supports LWC. - # This process is automatically handled in OmniQuant's code. - search_clip_init: True - quant_out: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/combination/awq_comb_omni/w6a6/step_1_awq.yml b/configs/quantization/combination/awq_comb_omni/w6a6/step_1_awq.yml deleted file mode 100644 index 083dc4ec0..000000000 --- a/configs/quantization/combination/awq_comb_omni/w6a6/step_1_awq.yml +++ /dev/null @@ -1,55 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 6 - symmetric: False - granularity: per_channel - group_size: -1 - calib_algo: learnable - act: - bit: 6 - symmetric: False - granularity: per_token - calib_algo: minmax - special: - trans: True - trans_version: v2 - weight_clip: True - # Only the version v2 clipping method supports saving clip factors. - # And the weight calibration algorithm must be set to "learnable". - clip_version: v2 - # Save scale and clip factors for OmniQuant's LWC and LET. - save_scale: True - scale_path: /path/to/scale - save_clip: True - clip_path: /path/to/clip -save: - save_trans: False - save_fake: False - save_path: ./save diff --git a/configs/quantization/combination/awq_comb_omni/w6a6/step_2_omniq.yml b/configs/quantization/combination/awq_comb_omni/w6a6/step_2_omniq.yml deleted file mode 100644 index 3af983dfa..000000000 --- a/configs/quantization/combination/awq_comb_omni/w6a6/step_2_omniq.yml +++ /dev/null @@ -1,64 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: wikitext2 - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: OmniQuant - weight: - bit: 6 - symmetric: False - granularity: per_channel - calib_algo: learnable - ste: True - act: - bit: 6 - symmetric: False - granularity: per_token - ste: True - special: - aug_loss: False - lwc: True - let: True - lwc_lr: 0.001 - let_lr: 0.001 - use_shift: False - alpha: 0.5 - deactive_amp: True - epochs: 5 - wd: 0 - # Use AWQ's search clip factors to initialize OmniQuant's clip factors, - # Then refine them through learning (LWC). - search_clip_init: True - load_clip: True - clip_path: /path/to/scale - # Use AWQ's search scale factors to initialize OmniQuant's scale factors, - # Then refine them through learning (LET). - search_scale_init: True - scale_path: /path/to/clip - quant_out: True -save: - save_trans: False - save_fake: False - save_path: ./save diff --git a/configs/quantization/combination/awq_comb_omni/w8a8/step_1_awq.yml b/configs/quantization/combination/awq_comb_omni/w8a8/step_1_awq.yml deleted file mode 100644 index 3c3b24380..000000000 --- a/configs/quantization/combination/awq_comb_omni/w8a8/step_1_awq.yml +++ /dev/null @@ -1,55 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 8 - symmetric: False - granularity: per_channel - group_size: -1 - calib_algo: learnable - act: - bit: 8 - symmetric: False - granularity: per_token - calib_algo: minmax - special: - trans: True - trans_version: v2 - weight_clip: True - # Only the version v2 clipping method supports saving clip factors. - # And the weight calibration algorithm must be set to "learnable". - clip_version: v2 - # Save scale and clip factors for OmniQuant's LWC and LET. - save_scale: True - scale_path: /path/to/scale - save_clip: True - clip_path: /path/to/clip -save: - save_trans: False - save_fake: False - save_path: ./save diff --git a/configs/quantization/combination/awq_comb_omni/w8a8/step_2_omniq.yml b/configs/quantization/combination/awq_comb_omni/w8a8/step_2_omniq.yml deleted file mode 100644 index 0efc8d83e..000000000 --- a/configs/quantization/combination/awq_comb_omni/w8a8/step_2_omniq.yml +++ /dev/null @@ -1,64 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: wikitext2 - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: OmniQuant - weight: - bit: 8 - symmetric: False - granularity: per_channel - calib_algo: learnable - ste: True - act: - bit: 8 - symmetric: False - granularity: per_token - ste: True - special: - aug_loss: False - lwc: True - let: True - lwc_lr: 0.001 - let_lr: 0.001 - use_shift: False - alpha: 0.5 - deactive_amp: True - epochs: 5 - wd: 0 - # Use AWQ's search clip factors to initialize OmniQuant's clip factors, - # Then refine them through learning (LWC). - search_clip_init: True - load_clip: True - clip_path: /path/to/scale - # Use AWQ's search scale factors to initialize OmniQuant's scale factors, - # Then refine them through learning (LET). - search_scale_init: True - scale_path: /path/to/clip - quant_out: True -save: - save_trans: False - save_fake: False - save_path: ./save diff --git a/configs/quantization/combination/quant_comb_token_pruning/rtn_w_a_vlm.yml b/configs/quantization/combination/quant_comb_token_pruning/rtn_w_a_vlm.yml deleted file mode 100644 index 3c7b536ac..000000000 --- a/configs/quantization/combination/quant_comb_token_pruning/rtn_w_a_vlm.yml +++ /dev/null @@ -1,45 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, fake_quant] - type: vqa - name: mme - download: False - path: MME dataset path - bs: 1 - inference_per_block: False -quant: - vision: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - language: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - token_reduction: - method: FastV - special: - pruning_loc: 3 - rate: 0.5 -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/combination/quarot_comb_gptq/w4a4/step_1_quarot.yml b/configs/quantization/combination/quarot_comb_gptq/w4a4/step_1_quarot.yml deleted file mode 100644 index dde704649..000000000 --- a/configs/quantization/combination/quarot_comb_gptq/w4a4/step_1_quarot.yml +++ /dev/null @@ -1,38 +0,0 @@ -base: - seed: &seed 0 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Quarot - weight: - bit: 4 - symmetric: False - granularity: per_channel - group_size: -1 - calib_algo: minmax - act: - bit: 4 - symmetric: False - granularity: per_token - special: - rotate_mode: hadamard - fp32_had: True - online_rotate: True -save: - # Save the Quarot-transformed model. - save_trans: True - save_fake: False - save_path: /path/to/save_quarot_trans_for_gptq/ diff --git a/configs/quantization/combination/quarot_comb_gptq/w4a4/step_2_gptq.yml b/configs/quantization/combination/quarot_comb_gptq/w4a4/step_2_gptq.yml deleted file mode 100644 index 7d12092be..000000000 --- a/configs/quantization/combination/quarot_comb_gptq/w4a4/step_2_gptq.yml +++ /dev/null @@ -1,52 +0,0 @@ -base: - seed: &seed 0 -model: - type: Llama - # Load Quarot-transformed model - path: /path/to/save_quarot_trans_for_gptq/transformed_model - torch_dtype: auto -calib: - name: wikitext2 - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: GPTQ - weight: - bit: 4 - symmetric: False - granularity: per_channel - group_size: -1 - calib_algo: mse - act: - bit: 4 - symmetric: False - granularity: per_token - calib_algo: minmax - special: - actorder: True - static_groups: True - percdamp: 0.01 - blocksize: 128 - true_sequential: True - online_rotate: True - fp32_had: True - quant_out: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/combination/quarot_comb_gptq/w8a8/step_1_quarot.yml b/configs/quantization/combination/quarot_comb_gptq/w8a8/step_1_quarot.yml deleted file mode 100644 index a601f82d4..000000000 --- a/configs/quantization/combination/quarot_comb_gptq/w8a8/step_1_quarot.yml +++ /dev/null @@ -1,38 +0,0 @@ -base: - seed: &seed 0 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Quarot - weight: - bit: 8 - symmetric: False - granularity: per_channel - group_size: -1 - calib_algo: minmax - act: - bit: 8 - symmetric: False - granularity: per_token - special: - rotate_mode: hadamard - fp32_had: True - online_rotate: True -save: - # Save the Quarot-transformed model. - save_trans: True - save_fake: False - save_path: /path/to/save_quarot_trans_for_gptq/ diff --git a/configs/quantization/combination/quarot_comb_gptq/w8a8/step_2_gptq.yml b/configs/quantization/combination/quarot_comb_gptq/w8a8/step_2_gptq.yml deleted file mode 100644 index 2f12cbfe6..000000000 --- a/configs/quantization/combination/quarot_comb_gptq/w8a8/step_2_gptq.yml +++ /dev/null @@ -1,53 +0,0 @@ -base: - seed: &seed 0 -model: - type: model_type - # Load Quarot-transformed model - path: /path/to/save_quarot_trans_for_gptq/transformed_model - torch_dtype: auto -calib: - name: wikitext2 - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: GPTQ - weight: - bit: 8 - symmetric: False - granularity: per_channel - group_size: -1 - calib_algo: mse - act: - bit: 8 - symmetric: False - granularity: per_token - calib_algo: minmax - special: - actorder: True - static_groups: True - percdamp: 0.01 - blocksize: 128 - chunk_num: 4 - true_sequential: True - online_rotate: True - fp32_had: True - quant_out: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/deepseekv3/awq_w_only_dsv3.yml b/configs/quantization/deepseekv3/awq_w_only_dsv3.yml deleted file mode 100755 index 12ba228bd..000000000 --- a/configs/quantization/deepseekv3/awq_w_only_dsv3.yml +++ /dev/null @@ -1,33 +0,0 @@ -base: - seed: &seed 42 -model: - type: DeepseekV3 - path: Deepseekv3-fp8-path - tokenizer_mode: fast - torch_dtype: torch.float8_e4m3fn - block_wise_quant: True -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: pileval_awq - seed: *seed -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 64 - pack_version: gemm_pack - special: - trans: True - trans_version: v2 - weight_clip: True - save_mem: False -save: - save_autoawq: True - save_path: /path/to/save/ diff --git a/configs/quantization/deepseekv3/awq_w_only_dsv3_bf16.yml b/configs/quantization/deepseekv3/awq_w_only_dsv3_bf16.yml deleted file mode 100755 index 3bbee538a..000000000 --- a/configs/quantization/deepseekv3/awq_w_only_dsv3_bf16.yml +++ /dev/null @@ -1,32 +0,0 @@ -base: - seed: &seed 42 -model: - type: DeepseekV3 - path: Deepseekv3-bf16-path - tokenizer_mode: fast - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: pileval_awq - seed: *seed -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 64 - pack_version: gemm_pack - special: - trans: True - trans_version: v2 - weight_clip: True - save_mem: False -save: - save_autoawq: True - save_path: /path/to/save/ diff --git a/configs/quantization/deepseekv3/osplus_w_a_dsv3.yml b/configs/quantization/deepseekv3/osplus_w_a_dsv3.yml deleted file mode 100755 index cea88a552..000000000 --- a/configs/quantization/deepseekv3/osplus_w_a_dsv3.yml +++ /dev/null @@ -1,33 +0,0 @@ -base: - seed: &seed 42 -model: - type: DeepseekV3 - path: Deepseekv3-fp8-path - tokenizer_mode: fast - torch_dtype: torch.float8_e4m3fn - block_wise_quant: True -calib: - name: pileval - download: False - path: calib data path - n_samples: 1 - bs: 1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -quant: - method: OsPlus - weight: - bit: 8 - symmetric: True - granularity: per_channel - act: - bit: 8 - symmetric: True - granularity: per_token - special: - true_sequential: True - quant_out: True -save: - save_vllm: True - save_path: /path/to/save/ diff --git a/configs/quantization/deepseekv3/quarot_w_a_dsv3.yml b/configs/quantization/deepseekv3/quarot_w_a_dsv3.yml deleted file mode 100755 index 3710a85eb..000000000 --- a/configs/quantization/deepseekv3/quarot_w_a_dsv3.yml +++ /dev/null @@ -1,25 +0,0 @@ -base: - seed: &seed 42 -model: - type: DeepseekV3 - path: Deepseekv3-fp8-path - tokenizer_mode: fast - torch_dtype: torch.float8_e4m3fn - block_wise_quant: True -quant: - method: Quarot - weight: - bit: 8 - symmetric: False - granularity: per_channel - act: - bit: 8 - symmetric: False - granularity: per_token - special: - rotate_mode: hadamard - fp32_had: True - online_rotate: False -save: - save_vllm: True - save_path: /path/to/save/ diff --git a/configs/quantization/deepseekv3/rtn_w_a_dsv3.yml b/configs/quantization/deepseekv3/rtn_w_a_dsv3.yml deleted file mode 100755 index f8970c2a6..000000000 --- a/configs/quantization/deepseekv3/rtn_w_a_dsv3.yml +++ /dev/null @@ -1,22 +0,0 @@ -base: - seed: &seed 42 -model: - type: DeepseekV3 - path: Deepseekv3-fp8-path - tokenizer_mode: fast - torch_dtype: torch.float8_e4m3fn - block_wise_quant: True -quant: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token -save: - save_vllm: True - save_path: /path/to/save/ diff --git a/configs/quantization/deepseekv3/rtn_w_only_dsv3.yml b/configs/quantization/deepseekv3/rtn_w_only_dsv3.yml deleted file mode 100755 index 417edb4e2..000000000 --- a/configs/quantization/deepseekv3/rtn_w_only_dsv3.yml +++ /dev/null @@ -1,19 +0,0 @@ -base: - seed: &seed 42 -model: - type: DeepseekV3 - path: Deepseekv3-fp8-path - tokenizer_mode: fast - torch_dtype: torch.float8_e4m3fn - block_wise_quant: True -quant: - method: RTN - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 64 - pack_version: gemm_pack -save: - save_autoawq: True - save_path: /path/to/save/ diff --git a/configs/quantization/deepseekv3/smoothquant_w_a_dsv3.yml b/configs/quantization/deepseekv3/smoothquant_w_a_dsv3.yml deleted file mode 100755 index a25a9dcc0..000000000 --- a/configs/quantization/deepseekv3/smoothquant_w_a_dsv3.yml +++ /dev/null @@ -1,32 +0,0 @@ -base: - seed: &seed 42 -model: - type: DeepseekV3 - path: Deepseekv3-fp8-path - tokenizer_mode: fast - torch_dtype: torch.float8_e4m3fn - block_wise_quant: True -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -quant: - method: SmoothQuant - weight: - bit: 8 - symmetric: True - granularity: per_channel - act: - bit: 8 - symmetric: True - granularity: per_token - special: - alpha: 0.8 -save: - save_vllm: True - save_path: /path/to/save/ diff --git a/configs/quantization/methods/AdaDim/adadim_w_a.yml b/configs/quantization/methods/AdaDim/adadim_w_a.yml deleted file mode 100644 index fb4541eb7..000000000 --- a/configs/quantization/methods/AdaDim/adadim_w_a.yml +++ /dev/null @@ -1,41 +0,0 @@ -base: - seed: &seed 0 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: c4 - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: c4_gptq - seed: *seed -eval: - eval_pos: [pretrain, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: AdaDim - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - quant_out: True -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/Awq/awq_w_a.yml b/configs/quantization/methods/Awq/awq_w_a.yml deleted file mode 100644 index ba280a2c7..000000000 --- a/configs/quantization/methods/Awq/awq_w_a.yml +++ /dev/null @@ -1,47 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: pileval_awq - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - trans_version: v2 - weight_clip: True - clip_sym: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/Awq/awq_w_a_chat_data.yml b/configs/quantization/methods/Awq/awq_w_a_chat_data.yml deleted file mode 100644 index d0598b617..000000000 --- a/configs/quantization/methods/Awq/awq_w_a_chat_data.yml +++ /dev/null @@ -1,48 +0,0 @@ -base: - seed: &seed 42 -model: - type: chat_model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: ultrachat - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - padding: True - preproc: ultrachat_general - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - trans_version: v2 - weight_clip: True - clip_sym: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/Awq/awq_w_a_mix_bits.yml b/configs/quantization/methods/Awq/awq_w_a_mix_bits.yml deleted file mode 100644 index 3affac0f4..000000000 --- a/configs/quantization/methods/Awq/awq_w_a_mix_bits.yml +++ /dev/null @@ -1,58 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: pileval_awq - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_channel - act: - bit: 4 - symmetric: False - granularity: per_token - mix_bits: - setting_0: - layer_name: [down_proj] - do_quant: True - weight: - bit: 8 - symmetric: False - granularity: per_channel - act: - bit: 8 - symmetric: False - granularity: per_token - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - trans_version: v2 - weight_clip: True - clip_sym: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/Awq/awq_w_only.yml b/configs/quantization/methods/Awq/awq_w_only.yml deleted file mode 100644 index 3e293f5c0..000000000 --- a/configs/quantization/methods/Awq/awq_w_only.yml +++ /dev/null @@ -1,45 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: pileval_awq - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - # But their results don't differ significantly. - trans_version: v2 - weight_clip: True - # For 2-bit quantization, setting "clip_sym: False" will yield better results. - clip_sym: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/Awq/awq_w_only_custom_alm_data_padding.yml b/configs/quantization/methods/Awq/awq_w_only_custom_alm_data_padding.yml deleted file mode 100644 index 30a721386..000000000 --- a/configs/quantization/methods/Awq/awq_w_only_custom_alm_data_padding.yml +++ /dev/null @@ -1,47 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: custom_mm - download: False - path: calib data path - apply_chat_template: True - add_answer: True # Defalut is False. If set it to Ture, calib data will add answers. - n_samples: 8 - bs: -1 - seq_len: 512 - padding: True - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - # But their results don't differ significantly. - trans_version: v2 - weight_clip: True - # For 2-bit quantization, setting "clip_sym: False" will yield better results. - clip_sym: False -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/Awq/awq_w_only_custom_avlm_data_padding.yml b/configs/quantization/methods/Awq/awq_w_only_custom_avlm_data_padding.yml deleted file mode 100644 index df4208938..000000000 --- a/configs/quantization/methods/Awq/awq_w_only_custom_avlm_data_padding.yml +++ /dev/null @@ -1,48 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: fast - torch_dtype: auto - use_cpu_to_save_cuda_mem_for_catcher: False -calib: - name: custom_mm - download: False - path: calib data path - apply_chat_template: True - add_answer: True # Defalut is False. If set it to Ture, calib data will add answers. - n_samples: 8 - bs: -1 - seq_len: 512 - padding: True - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - # But their results don't differ significantly. - trans_version: v2 - weight_clip: False - # For 2-bit quantization, setting "clip_sym: False" will yield better results. - clip_sym: False -save: - save_trans: False - save_fake: False - save_path: ./save_vlm_glm4v/ diff --git a/configs/quantization/methods/Awq/awq_w_only_custom_data.yml b/configs/quantization/methods/Awq/awq_w_only_custom_data.yml deleted file mode 100644 index 90fad3a03..000000000 --- a/configs/quantization/methods/Awq/awq_w_only_custom_data.yml +++ /dev/null @@ -1,63 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: custom_txt - download: False - path: calib data path - apply_chat_template: True - n_samples: 128 - seq_len: 512 - bs: -1 - preproc: random_truncate_txt - seed: *seed -eval: - - eval_pos: [pretrain, transformed, fake_quant] - name: custom_gen - type: generate_only - max_new_tokens: 32 - bs: 1 - download: False - path: /data/yongyang/datasets/general_custom_data - apply_chat_template: True - inference_per_block: False - - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - type: token_acc - download: False - path: /data/yongyang/datasets/llmc/eval/wikitext2 - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False - - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: /data/yongyang/datasets/llmc/eval/wikitext2 - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - trans_version: v2 - weight_clip: True - clip_sym: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/Awq/awq_w_only_custom_data_debug.yml b/configs/quantization/methods/Awq/awq_w_only_custom_data_debug.yml deleted file mode 100644 index 779efa6a8..000000000 --- a/configs/quantization/methods/Awq/awq_w_only_custom_data_debug.yml +++ /dev/null @@ -1,121 +0,0 @@ -base: - seed: &seed 42 -model: - # type: Qwen2 - # path: /data/yongyang/models/qwen25/Qwen2.5-0.5B-Instruct - type: Qwen2VL - path: /data/yongyang/models/Qwen2-VL-2B-Instruct - # type: Llava - # path: /data/yongyang/models/llava-1.5-7b-hf - # type: InternVL2 - # path: /data/yongyang/models/InternVL2-2B - # type: Qwen2Audio - # path: /data/yongyang/models/Qwen2-Audio-7B-Instruct - # type: InternOmni - # path: /data/yongyang/models/InternOmni - # type: Llama - # path: /data/yongyang/models/Meta-Llama-3.1-8B-Instruct - # type: InternLM2 - # path: /data/yongyang/models/internlm2-chat-1_8b - # type: DeepseekV2 - # path: /data/yongyang/models/DeepSeek-V2-Lite-Chat - tokenizer_mode: fast - torch_dtype: auto -# calib: -# name: pileval -# download: False -# path: /data/yongyang/datasets/llmc/calib/pileval -# n_samples: 2 -# bs: -1 -# seq_len: 512 -# preproc: txt_general_preproc -# seed: *seed -# calib: -# name: custom_txt -# download: False -# path: /data/yongyang/datasets/general_custom_data -# apply_chat_template: True -# n_samples: 8 -# bs: -1 -# padding: True -# seed: *seed -# calib: -# name: custom_txt -# download: False -# path: /data/yongyang/datasets/general_custom_data -# apply_chat_template: True -# n_samples: 8 -# seq_len: 3 -# bs: -1 -# preproc: random_truncate_txt -# seed: *seed -calib: - name: custom_mm - download: False - path: /data/yongyang/datasets/general_custom_data - apply_chat_template: True - add_answer: True # Defalut is False. If set it to Ture, calib data will add answers. - n_samples: 8 - bs: -1 - seq_len: 512 - padding: True - seed: *seed -eval: - - eval_pos: [pretrain, transformed, fake_quant] - name: custom_gen - type: generate_only - max_new_tokens: 32 - bs: 1 - download: False - path: /data/yongyang/datasets/general_custom_data - apply_chat_template: True - inference_per_block: False - - eval_pos: [pretrain, transformed, fake_quant] - name: custom_ppl - type: ppl - download: False - path: /data/yongyang/datasets/general_custom_data - apply_chat_template: False - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False - - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - type: token_acc - download: False - path: /data/yongyang/datasets/llmc/eval/wikitext2 - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False - - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: /data/yongyang/datasets/llmc/eval/wikitext2 - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - # But their results don't differ significantly. - trans_version: v2 - weight_clip: False - # For 2-bit quantization, setting "clip_sym: False" will yield better results. - clip_sym: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/Awq/awq_w_only_custom_data_padding.yml b/configs/quantization/methods/Awq/awq_w_only_custom_data_padding.yml deleted file mode 100644 index 71576ae27..000000000 --- a/configs/quantization/methods/Awq/awq_w_only_custom_data_padding.yml +++ /dev/null @@ -1,45 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: custom_txt - download: False - path: calib data path - apply_chat_template: True - n_samples: 8 - bs: -1 - padding: True - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - # But their results don't differ significantly. - trans_version: v2 - weight_clip: False - # For 2-bit quantization, setting "clip_sym: False" will yield better results. - clip_sym: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/Awq/awq_w_only_custom_eval.yml b/configs/quantization/methods/Awq/awq_w_only_custom_eval.yml deleted file mode 100644 index 4eb40ddef..000000000 --- a/configs/quantization/methods/Awq/awq_w_only_custom_eval.yml +++ /dev/null @@ -1,45 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: pileval_awq - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: custom_gen - type: generate_only - max_new_tokens: 32 - bs: 1 - download: False - path: /data/yongyang/datasets/general_custom_data - apply_chat_template: True - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - # But their results don't differ significantly. - trans_version: v2 - weight_clip: True - # For 2-bit quantization, setting "clip_sym: False" will yield better results. - clip_sym: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/Awq/awq_w_only_custom_vlm_data_padding.yml b/configs/quantization/methods/Awq/awq_w_only_custom_vlm_data_padding.yml deleted file mode 100644 index 30a721386..000000000 --- a/configs/quantization/methods/Awq/awq_w_only_custom_vlm_data_padding.yml +++ /dev/null @@ -1,47 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: custom_mm - download: False - path: calib data path - apply_chat_template: True - add_answer: True # Defalut is False. If set it to Ture, calib data will add answers. - n_samples: 8 - bs: -1 - seq_len: 512 - padding: True - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - # But their results don't differ significantly. - trans_version: v2 - weight_clip: True - # For 2-bit quantization, setting "clip_sym: False" will yield better results. - clip_sym: False -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/Awq/awq_w_only_mix_bits_1.yml b/configs/quantization/methods/Awq/awq_w_only_mix_bits_1.yml deleted file mode 100644 index e92766b04..000000000 --- a/configs/quantization/methods/Awq/awq_w_only_mix_bits_1.yml +++ /dev/null @@ -1,52 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: pileval_awq - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - mix_bits: - setting_0: - layer_name: [down_proj] - do_quant: True - weight: - bit: 8 - symmetric: False - granularity: per_group - group_size: 128 - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - trans_version: v2 - weight_clip: True - clip_sym: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/Awq/awq_w_only_mix_bits_2.yml b/configs/quantization/methods/Awq/awq_w_only_mix_bits_2.yml deleted file mode 100644 index 785d98ed8..000000000 --- a/configs/quantization/methods/Awq/awq_w_only_mix_bits_2.yml +++ /dev/null @@ -1,55 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: pileval_awq - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - mix_bits: - setting_0: - layer_name: [down_proj#0-1-2-3-28-29-30-31] - do_quant: True - weight: - bit: 8 - symmetric: False - granularity: per_group - group_size: 128 - setting_1: - layer_name: [o_proj] - do_quant: False - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - trans_version: v2 - weight_clip: True - clip_sym: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/Awq/awq_w_only_opencompass.yml b/configs/quantization/methods/Awq/awq_w_only_opencompass.yml deleted file mode 100644 index 701f69200..000000000 --- a/configs/quantization/methods/Awq/awq_w_only_opencompass.yml +++ /dev/null @@ -1,51 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: pileval_awq - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - # But their results don't differ significantly. - trans_version: v2 - weight_clip: True - # For 2-bit quantization, setting "clip_sym: False" will yield better results. - clip_sym: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ - -opencompass: - # eval_base.py for base model, eval_chat.py for chat model. configs can be found in llmc/configs/opencompass. - cfg_path: opencompass config path - max_num_workers: 1 - output_path: ./oc_output diff --git a/configs/quantization/methods/Awq/awq_w_only_vit.yml b/configs/quantization/methods/Awq/awq_w_only_vit.yml deleted file mode 100644 index 11bb5693d..000000000 --- a/configs/quantization/methods/Awq/awq_w_only_vit.yml +++ /dev/null @@ -1,43 +0,0 @@ -base: - seed: &seed 42 -model: - type: Vit - path: /mnt/nvme1/yongyang/models/vit-base-patch16-224 - tokenizer_mode: fast - torch_dtype: auto -calib: - name: images - download: False - path: /mnt/nvme1/yongyang/general_custom_data - n_samples: 128 - bs: 1 - apply_chat_template: False - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: imagenet - type: acc - download: False - path: /mnt/nvme1/yongyang/datasets/imagenet/val - bs: 512 -quant: - method: Awq - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - trans_version: v2 - weight_clip: False - clip_sym: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/Awq/awq_w_only_vlm.yml b/configs/quantization/methods/Awq/awq_w_only_vlm.yml deleted file mode 100644 index 6621a18ec..000000000 --- a/configs/quantization/methods/Awq/awq_w_only_vlm.yml +++ /dev/null @@ -1,61 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: custom_mm - download: False - path: calib data path - apply_chat_template: True - add_answer: True # Defalut is False. If set it to Ture, calib data will add answers. - n_samples: 8 - bs: -1 - seq_len: 512 - padding: True - seed: *seed -eval: - eval_pos: [pretrain, fake_quant] - type: vqa - name: mme - download: False - path: MME dataset path - bs: 1 - inference_per_block: False -quant: - vision: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - # But their results don't differ significantly. - trans_version: v2 - weight_clip: True - # For 2-bit quantization, setting "clip_sym: False" will yield better results. - clip_sym: True - language: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - # But their results don't differ significantly. - trans_version: v2 - weight_clip: True - # For 2-bit quantization, setting "clip_sym: False" will yield better results. - clip_sym: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/DGQ/dgq_w_a.yml b/configs/quantization/methods/DGQ/dgq_w_a.yml deleted file mode 100644 index 319ad9e0a..000000000 --- a/configs/quantization/methods/DGQ/dgq_w_a.yml +++ /dev/null @@ -1,45 +0,0 @@ -base: - seed: &seed 0 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: wikitext2 - download: False - path: calib data path - n_samples: 1 - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [pretrain, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: DGQ - weight: - w_1: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - w_2: - bit: 8 - symmetric: True - granularity: per_channel - act: - bit: 8 - symmetric: True - granularity: per_token -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/FP_Quant/awq_we2m1a16_g128.yml b/configs/quantization/methods/FP_Quant/awq_we2m1a16_g128.yml deleted file mode 100644 index f79baeba9..000000000 --- a/configs/quantization/methods/FP_Quant/awq_we2m1a16_g128.yml +++ /dev/null @@ -1,46 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - quant_type: float-quant - bit: e2m1 - symmetric: False - granularity: per_group - group_size: 128 - use_qtorch: True - special: - quant_type: float-quant - trans: True - # The options for "trans_version" include "v1" and "v2". - trans_version: v2 - weight_clip: True - # For 2-bit quantization, setting "clip_sym: False" will yield better results. - clip_sym: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/FP_Quant/gptq_we2m1a16_g128.yml b/configs/quantization/methods/FP_Quant/gptq_we2m1a16_g128.yml deleted file mode 100644 index af7835098..000000000 --- a/configs/quantization/methods/FP_Quant/gptq_we2m1a16_g128.yml +++ /dev/null @@ -1,46 +0,0 @@ -base: - seed: &seed 0 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: wikitext2 - download: False - n_samples: 128 - path: calib data path - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [pretrain, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: GPTQ - weight: - quant_type: float-quant - bit: e2m1 - symmetric: True - granularity: per_group - group_size: 128 - use_qtorch: True - special: - quant_type: float-quant - actorder: True - static_groups: False - percdamp: 0.01 - blocksize: 128 - true_sequential: True - quant_out: True -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/FP_Quant/rtn_we2m1a16_g128.yml b/configs/quantization/methods/FP_Quant/rtn_we2m1a16_g128.yml deleted file mode 100644 index c55be3619..000000000 --- a/configs/quantization/methods/FP_Quant/rtn_we2m1a16_g128.yml +++ /dev/null @@ -1,27 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: RTN - weight: - quant_type: float-quant - bit: e2m1 - symmetric: True - granularity: per_group - group_size: 128 -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/FP_Quant/rtn_we2m1ae2m1.yml b/configs/quantization/methods/FP_Quant/rtn_we2m1ae2m1.yml deleted file mode 100644 index b5ea6fa22..000000000 --- a/configs/quantization/methods/FP_Quant/rtn_we2m1ae2m1.yml +++ /dev/null @@ -1,31 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: RTN - weight: - quant_type: float-quant - bit: e2m1 - symmetric: True - granularity: per_channel - act: - quant_type: float-quant - bit: e2m1 - symmetric: True - granularity: per_token -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/FP_Quant/rtn_we4m3ae4m3.yml b/configs/quantization/methods/FP_Quant/rtn_we4m3ae4m3.yml deleted file mode 100644 index 8493780b6..000000000 --- a/configs/quantization/methods/FP_Quant/rtn_we4m3ae4m3.yml +++ /dev/null @@ -1,31 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llama - path: /mnt/nvme1/yongyang/models/llama2-7b - torch_dtype: auto -# eval: -# eval_pos: [pretrain, fake_quant] -# name: wikitext2 -# download: False -# path: /mnt/nvme0/yongyang/llm_datasets/llmc/eval/wikitext2 -# seq_len: 2048 -# # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". -# # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". -# bs: 1 -# inference_per_block: False -quant: - method: RTN - weight: - quant_type: float-quant - bit: e4m3 - symmetric: True - granularity: per_channel - act: - quant_type: float-quant - bit: e4m3 - symmetric: True - granularity: per_token -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/FP_Quant/rtn_we5m2ae5m2.yml b/configs/quantization/methods/FP_Quant/rtn_we5m2ae5m2.yml deleted file mode 100644 index ed3ea2f4e..000000000 --- a/configs/quantization/methods/FP_Quant/rtn_we5m2ae5m2.yml +++ /dev/null @@ -1,31 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: RTN - weight: - quant_type: float-quant - bit: e5m2 - symmetric: True - granularity: per_channel - act: - quant_type: float-quant - bit: e5m2 - symmetric: True - granularity: per_token -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/GPTQ/gptq_owq_w_only.yml b/configs/quantization/methods/GPTQ/gptq_owq_w_only.yml deleted file mode 100644 index ba961dfaa..000000000 --- a/configs/quantization/methods/GPTQ/gptq_owq_w_only.yml +++ /dev/null @@ -1,45 +0,0 @@ -base: - seed: &seed 0 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: c4 - download: False - n_samples: 128 - path: calib data path - bs: 1 - seq_len: 2048 - preproc: c4_gptq - seed: *seed -eval: - eval_pos: [pretrain, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: GPTQ - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - special: - actorder: False - static_groups: False - percdamp: 0.01 - blocksize: 128 - true_sequential: True - owq: True - #target bit is 4.01 - n_outs: [6, 6, 6, 6, 2, 2, 6] - quant_out: True -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/GPTQ/gptq_w_only.yml b/configs/quantization/methods/GPTQ/gptq_w_only.yml deleted file mode 100644 index e99a828a8..000000000 --- a/configs/quantization/methods/GPTQ/gptq_w_only.yml +++ /dev/null @@ -1,42 +0,0 @@ -base: - seed: &seed 0 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: wikitext2 - download: False - n_samples: 128 - path: calib data path - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 - inference_per_block: False -quant: - method: GPTQ - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - # calib_algo: mse - # mse_b_num: 2 - special: - actorder: True - static_groups: False - percdamp: 0.01 - blocksize: 128 - true_sequential: True - quant_out: True -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/GPTQ/gptq_w_only_vlm.yml b/configs/quantization/methods/GPTQ/gptq_w_only_vlm.yml deleted file mode 100644 index e6d9df736..000000000 --- a/configs/quantization/methods/GPTQ/gptq_w_only_vlm.yml +++ /dev/null @@ -1,46 +0,0 @@ -base: - seed: &seed 0 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: vlm_datastes - type: img_txt - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: vlm_general - padding: True - seed: *seed -eval: - eval_pos: [pretrain, fake_quant] - type: vqa - name: mme - download: False - path: MME dataset path - bs: 1 - inference_per_block: False -quant: - method: GPTQ - quant_objects: [vision, language] # default is [language] - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - # calib_algo: mse - # mse_b_num: 2 - special: - actorder: True - static_groups: False - percdamp: 0.01 - blocksize: 128 - true_sequential: True - quant_out: True -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/HQQ/hqq_w_only.yml b/configs/quantization/methods/HQQ/hqq_w_only.yml deleted file mode 100644 index 737ebfe74..000000000 --- a/configs/quantization/methods/HQQ/hqq_w_only.yml +++ /dev/null @@ -1,33 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: HQQ - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - round_zp: False - special: - axis : 0 - lp_norm : 0.7 - beta : 10 - kappa : 1.01 - iters : 20 -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/KVQuant/rtn_w_a_kivi_quant_kv.yml b/configs/quantization/methods/KVQuant/rtn_w_a_kivi_quant_kv.yml deleted file mode 100644 index 9d4543872..000000000 --- a/configs/quantization/methods/KVQuant/rtn_w_a_kivi_quant_kv.yml +++ /dev/null @@ -1,35 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -eval: - eval_pos: [transformed, fake_quant, fake_quant_wo_kv] #decode_ppl eval not support pretrain eval pos - name: wikitext2 - type: decode_ppl - download: False - path: eval_data_path - bs: 1 - inference_per_block: False - num_samples: 50 - # num_eval_tokens: 3 -quant: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - kvcache: - method: Kivi - bit: 8 - symmetric: True - granularity: per_token -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/KVQuant/rtn_w_a_naive_quant_kv.yml b/configs/quantization/methods/KVQuant/rtn_w_a_naive_quant_kv.yml deleted file mode 100644 index 4a9452e18..000000000 --- a/configs/quantization/methods/KVQuant/rtn_w_a_naive_quant_kv.yml +++ /dev/null @@ -1,35 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -eval: - eval_pos: [transformed, fake_quant, fake_quant_wo_kv] #decode_ppl eval not support pretrain eval pos - name: wikitext2 - type: decode_ppl - download: False - path: eval_data_path - bs: 1 - inference_per_block: False - num_samples: 50 - # num_eval_tokens: 3 -quant: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - kvcache: - method: Naive - bit: 8 - symmetric: True - granularity: per_token -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/KVQuant/rtn_w_a_pertensor_static_naive_quant_kv.yml b/configs/quantization/methods/KVQuant/rtn_w_a_pertensor_static_naive_quant_kv.yml deleted file mode 100644 index f2bbda675..000000000 --- a/configs/quantization/methods/KVQuant/rtn_w_a_pertensor_static_naive_quant_kv.yml +++ /dev/null @@ -1,45 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [transformed, fake_quant, fake_quant_wo_kv] #long_ppl eval not support pretrain eval pos - name: wikitext2 - type: decode_ppl - download: False - path: eval_data_path - bs: 1 - inference_per_block: False - num_samples: 10 - # num_eval_tokens: 3 -quant: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_tensor - static: True - kvcache: - method: Naive - bit: 8 - symmetric: True - granularity: per_tensor -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/LlmInt8/llmint8_w_only.yml b/configs/quantization/methods/LlmInt8/llmint8_w_only.yml deleted file mode 100644 index b8047b198..000000000 --- a/configs/quantization/methods/LlmInt8/llmint8_w_only.yml +++ /dev/null @@ -1,41 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llama - path: model path - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [pretrain, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: LlmInt8 - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - special: - threshold: 6.0 -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/MixPrecision/awq_w_a.yml b/configs/quantization/methods/MixPrecision/awq_w_a.yml deleted file mode 100644 index 78b341878..000000000 --- a/configs/quantization/methods/MixPrecision/awq_w_a.yml +++ /dev/null @@ -1,52 +0,0 @@ -base: - seed: &seed 42 -model: - type: DeepseekV2 - path: /path/to/DeepseekV2 - torch_dtype: auto -calib: - name: pileval - download: False - path: /path/to/pileval - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: pileval_awq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: /path/to/wikitext2 - seq_len: 2048 - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - special: - trans: True - trans_version: v2 - weight_clip: False - clip_sym: True -ignored_layers: - # block_ids and layer_names together determine which layers use high precision (such as bf16 or fp16) for computation. - # For example, '4' and 'self_attn.q_proj' represent the model.layers.4.mlp.self_attn.q_proj layer using high precision, - # while '15-23' and 'self_attn.kv_b_proj' represent layers 15 to 23 of self_attn.kv_b_proj not being quantized. - block_ids: [4, 5, 6, 15-23] - layer_names: ["self_attn.q_proj", "self_attn.kv_a_proj_with_mqa", "self_attn.kv_b_proj", "self_attn.o_proj"] - # You can also specify certain layers for high precision computation using speical_names, - # but you must provide the full name of the layer - speical_names: ["model.layers.0.mlp.down_proj"] -save: - save_vllm: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/MixPrecision/awq_w_a_static.yml b/configs/quantization/methods/MixPrecision/awq_w_a_static.yml deleted file mode 100644 index f88f7d3cd..000000000 --- a/configs/quantization/methods/MixPrecision/awq_w_a_static.yml +++ /dev/null @@ -1,49 +0,0 @@ -base: - seed: &seed 42 -model: - type: DeepseekV2 - path: /path/to/DeepseekV2 - torch_dtype: auto -calib: - name: pileval - download: False - path: /path/to/pileval - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: pileval_awq - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: /path/to/wikitext2 - seq_len: 2048 - bs: 1 - inference_per_block: False -quant: - method: Awq - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_tensor - static: True - calib_algo: static_hist - special: - trans: True - trans_version: v2 - weight_clip: False - clip_sym: True -ignored_layers: - block_ids: [0-26] - layer_names: ["self_attn.q_proj", "self_attn.kv_a_proj_with_mqa", "self_attn.kv_b_proj", "self_attn.o_proj"] - speical_names: [] -save: - save_vllm: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/MixPrecision/rtn_w_a.yml b/configs/quantization/methods/MixPrecision/rtn_w_a.yml deleted file mode 100644 index 6ce260b82..000000000 --- a/configs/quantization/methods/MixPrecision/rtn_w_a.yml +++ /dev/null @@ -1,38 +0,0 @@ -base: - seed: &seed 42 -model: - type: DeepseekV2 - path: /path/to/DeepseekV2 - torch_dtype: auto -eval: - eval_pos: [pretrain, fake_quant] - name: wikitext2 - download: False - path: /path/to/wikitext2 - seq_len: 2048 - bs: 1 - inference_per_block: False -quant: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token -ignored_layers: - # block_ids and layer_names together determine which layers use high precision (such as bf16 or fp16) for computation. - # For example, '4' and 'self_attn.q_proj' represent the model.layers.4.mlp.self_attn.q_proj layer using high precision, - # while '15-23' and 'self_attn.kv_b_proj' represent layers 15 to 23 of self_attn.kv_b_proj not being quantized. - block_ids: [4, 5, 6, 15-23] - layer_names: ["self_attn.q_proj", "self_attn.kv_a_proj_with_mqa", "self_attn.kv_b_proj", "self_attn.o_proj"] - # You can also specify certain layers for high precision computation using speical_names, - # but you must provide the full name of the layer - speical_names: ["model.layers.0.mlp.down_proj"] -save: - save_vllm: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/MixPrecision/rtn_w_a_static.yml b/configs/quantization/methods/MixPrecision/rtn_w_a_static.yml deleted file mode 100644 index 0c3702658..000000000 --- a/configs/quantization/methods/MixPrecision/rtn_w_a_static.yml +++ /dev/null @@ -1,44 +0,0 @@ -base: - seed: &seed 42 -model: - type: DeepseekV2 - path: /path/to/DeepseekV2 - torch_dtype: auto -calib: - name: pileval - download: False - path: /path/to/pileval - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [fake_quant] - name: wikitext2 - download: False - path: /path/to/wikitext2 - seq_len: 2048 - bs: 1 - inference_per_block: False -quant: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_tensor - static: True - calib_algo: static_hist -ignored_layers: - block_ids: [0-26] - layer_names: ["self_attn.q_proj", "self_attn.kv_a_proj_with_mqa", "self_attn.kv_b_proj", "self_attn.o_proj"] - speical_names: [] -save: - save_vllm: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/NormTweaking/ntweak_w_a.yml b/configs/quantization/methods/NormTweaking/ntweak_w_a.yml deleted file mode 100644 index 82b41499a..000000000 --- a/configs/quantization/methods/NormTweaking/ntweak_w_a.yml +++ /dev/null @@ -1,45 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [pretrain, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: NormTweaking - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - special: - ntweak_lr: 0.000001 - deactive_amp: True - epochs: 50 - gamma: 0.001 - quant_out: True -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/NormTweaking/ntweak_w_only.yml b/configs/quantization/methods/NormTweaking/ntweak_w_only.yml deleted file mode 100644 index ac3175fb2..000000000 --- a/configs/quantization/methods/NormTweaking/ntweak_w_only.yml +++ /dev/null @@ -1,41 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [pretrain, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: NormTweaking - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - special: - ntweak_lr: 0.000001 - deactive_amp: False - epochs: 50 - gamma: 0.001 - quant_out: True -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/OmniQuant/omniq_w_a.yml b/configs/quantization/methods/OmniQuant/omniq_w_a.yml deleted file mode 100644 index f1c3ddd45..000000000 --- a/configs/quantization/methods/OmniQuant/omniq_w_a.yml +++ /dev/null @@ -1,58 +0,0 @@ -base: - seed: &seed 2 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: wikitext2 - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: OmniQuant - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - calib_algo: learnable - ste: True - act: - bit: 8 - symmetric: True - granularity: per_token - ste: True - special: - aug_loss: False - let: True - lwc: True - lwc_lr: 0.01 - # Set "let_lr: 0.001" for w4a4 quantization. - let_lr: 0.005 - # Set to "True" if the model has bias (e.g. Opt). - use_shift: False - # Use "0.75" for w4a4 quantization. - alpha: 0.5 - deactive_amp: True - epochs: 20 - wd: 0 - quant_out: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/OmniQuant/omniq_w_only.yml b/configs/quantization/methods/OmniQuant/omniq_w_only.yml deleted file mode 100644 index f830ce74c..000000000 --- a/configs/quantization/methods/OmniQuant/omniq_w_only.yml +++ /dev/null @@ -1,53 +0,0 @@ -base: - seed: &seed 2 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: wikitext2 - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: wikitext2_gptq - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: OmniQuant - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - calib_algo: learnable - ste: True - special: - # Set to "True" for 2-bit quantization. - aug_loss: False - lwc: True - let: False - lwc_lr: 0.01 - let_lr: 0.005 - # Set to "True" if the model has bias (e.g. Opt). - use_shift: False - alpha: 0.5 - deactive_amp: True - # Use 40 epochs for 2-bit quantization. - epochs: 20 - wd: 0 - quant_out: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/OsPlus/osplus_w_a.yml b/configs/quantization/methods/OsPlus/osplus_w_a.yml deleted file mode 100644 index 67fe46a7d..000000000 --- a/configs/quantization/methods/OsPlus/osplus_w_a.yml +++ /dev/null @@ -1,42 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 1 - bs: 1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: OsPlus - weight: - bit: 8 - symmetric: True - granularity: per_channel - act: - bit: 8 - symmetric: True - granularity: per_token - special: - true_sequential: True - quant_out: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/QUIK/quik_w_a.yml b/configs/quantization/methods/QUIK/quik_w_a.yml deleted file mode 100644 index 4323b94f8..000000000 --- a/configs/quantization/methods/QUIK/quik_w_a.yml +++ /dev/null @@ -1,44 +0,0 @@ -base: - seed: &seed 0 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: c4 - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: c4_gptq - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: QUIK - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - special: - fp_relative: False - fp_features: 256 - fp_threshold: 0.0 - last_fc_bit: 8 -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/QuaRot/quarot_w_a.yml b/configs/quantization/methods/QuaRot/quarot_w_a.yml deleted file mode 100644 index 2fcb84108..000000000 --- a/configs/quantization/methods/QuaRot/quarot_w_a.yml +++ /dev/null @@ -1,36 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: Quarot - weight: - bit: 8 - symmetric: False - granularity: per_channel - group_size: -1 - calib_algo: minmax - act: - bit: 8 - symmetric: False - granularity: per_token - special: - rotate_mode: hadamard - fp32_had: True - online_rotate: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/RTN/rtn_w_a.yml b/configs/quantization/methods/RTN/rtn_w_a.yml deleted file mode 100644 index 241b4e36a..000000000 --- a/configs/quantization/methods/RTN/rtn_w_a.yml +++ /dev/null @@ -1,30 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/RTN/rtn_w_a_block.yml b/configs/quantization/methods/RTN/rtn_w_a_block.yml deleted file mode 100755 index 1dc38ef1c..000000000 --- a/configs/quantization/methods/RTN/rtn_w_a_block.yml +++ /dev/null @@ -1,26 +0,0 @@ -base: - seed: &seed 42 -model: - type: DeepseekV3 - path: /path/DeepSeek-R1-bf16 - tokenizer_mode: fast - torch_dtype: auto -quant: - method: RTN - weight: - quant_type: float-quant - bit: e4m3 - symmetric: True - granularity: per_block - block_size: 128 - use_qtorch: True - act: - quant_type: float-quant - bit: e4m3 - symmetric: True - granularity: per_group - group_size: 128 - use_qtorch: True -save: - save_vllm: True - save_path: /path/to/save/ diff --git a/configs/quantization/methods/RTN/rtn_w_a_kv.yml b/configs/quantization/methods/RTN/rtn_w_a_kv.yml deleted file mode 100644 index 3ec0870b6..000000000 --- a/configs/quantization/methods/RTN/rtn_w_a_kv.yml +++ /dev/null @@ -1,35 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - kvcache: - method: Naive - bit: 8 - symmetric: True - granularity: per_token -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/RTN/rtn_w_a_pertensor_static.yml b/configs/quantization/methods/RTN/rtn_w_a_pertensor_static.yml deleted file mode 100644 index 64b2eab70..000000000 --- a/configs/quantization/methods/RTN/rtn_w_a_pertensor_static.yml +++ /dev/null @@ -1,41 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: 1 - seq_len: 2048 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [pretrain, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_tensor - static: True - calib_algo: static_hist -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/RTN/rtn_w_a_vlm.yml b/configs/quantization/methods/RTN/rtn_w_a_vlm.yml deleted file mode 100644 index 728e21344..000000000 --- a/configs/quantization/methods/RTN/rtn_w_a_vlm.yml +++ /dev/null @@ -1,29 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, fake_quant] - type: vqa - name: mme - download: False - path: MME dataset path - bs: 1 - inference_per_block: False -quant: - method: RTN - quant_objects: [vision, language] # default is [language] - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/RTN/rtn_w_a_wint4afp8.yml b/configs/quantization/methods/RTN/rtn_w_a_wint4afp8.yml deleted file mode 100644 index 4156850c8..000000000 --- a/configs/quantization/methods/RTN/rtn_w_a_wint4afp8.yml +++ /dev/null @@ -1,40 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llama - path: /mnt/nvme1/yongyang/models/llama2-7b - torch_dtype: auto -eval: - eval_pos: [pretrain, fake_quant] - name: wikitext2 - download: False - path: /mnt/nvme0/yongyang/llm_datasets/llmc/eval/wikitext2 - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: RTN - weight: - bit: 48 - bit4: - symmetric: False - granularity: per_group - group_size: 128 - scales_bit: 8 - scales_symmetric: True - zeros_bit: 8 - zeros_symmetric: True - bit8: - symmetric: True - granularity: per_channel - int_range: [-120, 120] - act: - quant_type: float-quant - bit: e4m3 - symmetric: True - granularity: per_token -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/RTN/rtn_w_a_wint4aint8.yml b/configs/quantization/methods/RTN/rtn_w_a_wint4aint8.yml deleted file mode 100644 index 237c21181..000000000 --- a/configs/quantization/methods/RTN/rtn_w_a_wint4aint8.yml +++ /dev/null @@ -1,39 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llama - path: /mnt/nvme1/yongyang/models/llama2-7b - torch_dtype: auto -eval: - eval_pos: [pretrain, fake_quant] - name: wikitext2 - download: False - path: /mnt/nvme0/yongyang/llm_datasets/llmc/eval/wikitext2 - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: RTN - weight: - bit: 48 - bit4: - symmetric: False - granularity: per_group - group_size: 128 - scales_bit: 8 - scales_symmetric: True - zeros_bit: 8 - zeros_symmetric: True - bit8: - symmetric: True - granularity: per_channel - int_range: [-120, 120] - act: - bit: 8 - symmetric: True - granularity: per_token -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/RTN/rtn_w_only.yml b/configs/quantization/methods/RTN/rtn_w_only.yml deleted file mode 100644 index de59f63ed..000000000 --- a/configs/quantization/methods/RTN/rtn_w_only.yml +++ /dev/null @@ -1,26 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: RTN - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/SmoothQuant/smoothquant_w_a.yml b/configs/quantization/methods/SmoothQuant/smoothquant_w_a.yml deleted file mode 100644 index fa6ee64f2..000000000 --- a/configs/quantization/methods/SmoothQuant/smoothquant_w_a.yml +++ /dev/null @@ -1,41 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 512 - bs: 1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: SmoothQuant - weight: - bit: 8 - symmetric: True - granularity: per_channel - act: - bit: 8 - symmetric: True - granularity: per_token - special: - alpha: 0.8 -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/SmoothQuant/smoothquant_w_a_vlm.yml b/configs/quantization/methods/SmoothQuant/smoothquant_w_a_vlm.yml deleted file mode 100644 index caf962478..000000000 --- a/configs/quantization/methods/SmoothQuant/smoothquant_w_a_vlm.yml +++ /dev/null @@ -1,43 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: vlm_datastes - type: img_txt - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: vlm_general - padding: True - seed: *seed -eval: - eval_pos: [pretrain, fake_quant] - type: vqa - name: mme - download: False - path: MME dataset path - bs: 1 - inference_per_block: False -quant: - method: SmoothQuant - quant_objects: [vision, language] - weight: - bit: 8 - symmetric: True - granularity: per_channel - act: - bit: 8 - symmetric: True - granularity: per_token - special: - alpha: 0.8 -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/SpQR/spqr_w_only.yml b/configs/quantization/methods/SpQR/spqr_w_only.yml deleted file mode 100644 index 723e4a898..000000000 --- a/configs/quantization/methods/SpQR/spqr_w_only.yml +++ /dev/null @@ -1,56 +0,0 @@ -base: - seed: &seed 0 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: c4 - download: False - n_samples: 128 - path: calib data path - bs: 1 - seq_len: 2048 - preproc: c4_gptq - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: eval data path - seq_len: 2048 - # For 7B / 13B model eval, bs can be set to "1", and inference_per_block can be set to "False". - # For 70B model eval, bs can be set to "20", and inference_per_block can be set to "True". - bs: 1 - inference_per_block: False -quant: - method: SpQR - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 16 - round_zp: False - special: - actorder: True - percdamp: 1 - blocksize: 128 - true_sequential: True - relative_threshold: 0.2 - simplified_outliers: False - scale: - bit: 3 - symmetric: False - granularity: per_group - group_size: 16 - round_zp: False - zero: - bit: 3 - symmetric: False - granularity: per_group - group_size: 16 - round_zp: False - quant_out: True -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/methods/Tesseraq/tesseraq_w_only.yml b/configs/quantization/methods/Tesseraq/tesseraq_w_only.yml deleted file mode 100644 index fdf8a9b1a..000000000 --- a/configs/quantization/methods/Tesseraq/tesseraq_w_only.yml +++ /dev/null @@ -1,52 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: auto -calib: - name: c4 - download: False - n_samples: 256 - path: calib data path - bs: 1 - seq_len: 2048 - preproc: c4_gptq - seed: *seed -eval: - eval_pos: [fake_quant] # - name: [wikitext2, c4] - download: False - path: eval data path - bs: 1 - seq_len: 2048 - inference_per_block: False -quant: - method: TesseraQ - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - calib_algo: learnable - ste: True - special: - lr: 0.0005 - iterations: 250 - wd: 0.0 - batch_size: 2 - deactive_amp: False - aug_loss: False - optimize_scale: True - scale_lr: 0.0005 - thresholds: [0.75, 0.5, 0.375, 0.25, 0.125, 0.09, 0.06, 0.04, 0.02, 0.005] - weight_clip: True - load_transform: False - reduce_memory: True - clip_version: v1 - scale_path: /path/to/scale - clip_path: /path/to/clip - quant_out: True -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/quantization/video_gen/wan_i2v/awq_w_a.yaml b/configs/quantization/video_gen/wan_i2v/awq_w_a.yaml deleted file mode 100755 index 680fab43b..000000000 --- a/configs/quantization/video_gen/wan_i2v/awq_w_a.yaml +++ /dev/null @@ -1,49 +0,0 @@ -base: - seed: &seed 42 -model: - type: WanI2V - path: /path/to/model - torch_dtype: auto -calib: - name: i2v - download: False - path: ../assets/wan_i2v/calib/ - sample_steps: 40 - bs: 1 - target_height: 480 - target_width: 832 - num_frames: 81 - guidance_scale: 5.0 - seed: *seed -eval: - eval_pos: [fake_quant] - type: video_gen - name: i2v - download: False - path: ../assets/wan_i2v/eval/ - bs: 1 - target_height: 480 - target_width: 832 - num_frames: 81 - guidance_scale: 5.0 - output_video_path: ./output_videos_awq/ -quant: - video_gen: - method: Awq - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - special: - trans: True - trans_version: v2 - weight_clip: False - clip_sym: True -save: - save_lightx2v: True - save_path: /path/to/x2v/ diff --git a/configs/quantization/video_gen/wan_i2v/rtn_w_a.yaml b/configs/quantization/video_gen/wan_i2v/rtn_w_a.yaml deleted file mode 100755 index 15ff69a45..000000000 --- a/configs/quantization/video_gen/wan_i2v/rtn_w_a.yaml +++ /dev/null @@ -1,32 +0,0 @@ -base: - seed: &seed 42 -model: - type: WanI2V - path: /path/to/model - torch_dtype: auto -eval: - eval_pos: [fake_quant] - type: video_gen - name: i2v - download: False - path: ../assets/wan_i2v/eval/ - bs: 1 - target_height: 480 - target_width: 832 - num_frames: 81 - guidance_scale: 5.0 - output_video_path: ./output_videos_rtn/ -quant: - video_gen: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_channel - act: - bit: 8 - symmetric: True - granularity: per_token -save: - save_lightx2v: True - save_path: /path/to/x2v/ diff --git a/configs/quantization/video_gen/wan_i2v/rtn_w_a_lora.yaml b/configs/quantization/video_gen/wan_i2v/rtn_w_a_lora.yaml deleted file mode 100755 index 39925db7e..000000000 --- a/configs/quantization/video_gen/wan_i2v/rtn_w_a_lora.yaml +++ /dev/null @@ -1,33 +0,0 @@ -base: - seed: &seed 42 -model: - type: WanI2V - path: /path/to/model - lora_path: /path/to/lora_weights - torch_dtype: auto -eval: - eval_pos: [fake_quant] - type: video_gen - name: i2v - download: False - path: ../assets/wan_i2v/eval/ - bs: 1 - target_height: 480 - target_width: 832 - num_frames: 81 - guidance_scale: 5.0 - output_video_path: ./output_videos_rtn_lora/ -quant: - video_gen: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_channel - act: - bit: 8 - symmetric: True - granularity: per_token -save: - save_lightx2v: True - save_path: /path/to/x2v/ diff --git a/configs/quantization/video_gen/wan_i2v/smoothquant_w_a.yaml b/configs/quantization/video_gen/wan_i2v/smoothquant_w_a.yaml deleted file mode 100755 index e68dea80e..000000000 --- a/configs/quantization/video_gen/wan_i2v/smoothquant_w_a.yaml +++ /dev/null @@ -1,45 +0,0 @@ -base: - seed: &seed 42 -model: - type: WanI2V - path: /path/to/model - torch_dtype: auto -calib: - name: i2v - download: False - path: ../assets/wan_i2v/calib/ - sample_steps: 40 - bs: 1 - target_height: 480 - target_width: 832 - num_frames: 81 - guidance_scale: 5.0 - seed: *seed -eval: - eval_pos: [fake_quant] - type: video_gen - name: i2v - download: False - path: ../assets/wan_i2v/eval/ - bs: 1 - target_height: 480 - target_width: 832 - num_frames: 81 - guidance_scale: 5.0 - output_video_path: ./output_videos_sq/ -quant: - video_gen: - method: SmoothQuant - weight: - bit: 8 - symmetric: True - granularity: per_channel - act: - bit: 8 - symmetric: True - granularity: per_token - special: - alpha: 0.75 -save: - save_lightx2v: True - save_path: /path/to/x2v/ diff --git a/configs/quantization/video_gen/wan_i2v/smoothquant_w_a_fp8.yaml b/configs/quantization/video_gen/wan_i2v/smoothquant_w_a_fp8.yaml deleted file mode 100755 index e1cdc989a..000000000 --- a/configs/quantization/video_gen/wan_i2v/smoothquant_w_a_fp8.yaml +++ /dev/null @@ -1,49 +0,0 @@ -base: - seed: &seed 42 -model: - type: WanI2V - path: /path/to/model - torch_dtype: auto -calib: - name: i2v - download: False - path: ../assets/wan_i2v/calib/ - sample_steps: 40 - bs: 1 - target_height: 480 - target_width: 832 - num_frames: 81 - guidance_scale: 5.0 - seed: *seed -eval: - eval_pos: [fake_quant] - type: video_gen - name: i2v - download: False - path: ../assets/wan_i2v/eval/ - bs: 1 - target_height: 480 - target_width: 832 - num_frames: 81 - guidance_scale: 5.0 - output_video_path: ./output_videos_sq/ -quant: - video_gen: - method: SmoothQuant - weight: - quant_type: float-quant - bit: e4m3 - symmetric: True - granularity: per_channel - use_qtorch: True - act: - quant_type: float-quant - bit: e4m3 - symmetric: True - granularity: per_token - use_qtorch: True - special: - alpha: 0.75 -save: - save_lightx2v: True - save_path: /path/to/x2v/ diff --git a/configs/quantization/video_gen/wan_i2v/smoothquant_w_a_int8_lora.yaml b/configs/quantization/video_gen/wan_i2v/smoothquant_w_a_int8_lora.yaml deleted file mode 100755 index 6df416f77..000000000 --- a/configs/quantization/video_gen/wan_i2v/smoothquant_w_a_int8_lora.yaml +++ /dev/null @@ -1,46 +0,0 @@ -base: - seed: &seed 42 -model: - type: WanI2V - path: /path/to/model - lora_path: /path/to/lora_weights - torch_dtype: auto -calib: - name: i2v - download: False - path: ../assets/wan_i2v/calib/ - sample_steps: 40 - bs: 1 - target_height: 480 - target_width: 832 - num_frames: 81 - guidance_scale: 5.0 - seed: *seed -eval: - eval_pos: [fake_quant] - type: video_gen - name: i2v - download: False - path: ../assets/wan_i2v/eval/ - bs: 1 - target_height: 480 - target_width: 832 - num_frames: 81 - guidance_scale: 5.0 - output_video_path: ./output_videos_sq/ -quant: - video_gen: - method: SmoothQuant - weight: - bit: 8 - symmetric: True - granularity: per_channel - act: - bit: 8 - symmetric: True - granularity: per_token - special: - alpha: 0.75 -save: - save_lightx2v: True - save_path: /path/to/x2v/ diff --git a/configs/quantization/video_gen/wan_t2v/awq_w_a.yaml b/configs/quantization/video_gen/wan_t2v/awq_w_a.yaml deleted file mode 100755 index 14d05479d..000000000 --- a/configs/quantization/video_gen/wan_t2v/awq_w_a.yaml +++ /dev/null @@ -1,49 +0,0 @@ -base: - seed: &seed 42 -model: - type: WanT2V - path: /path/to/wan_t2v - torch_dtype: auto -calib: - name: t2v - download: False - path: ../assets/wan_t2v/calib/ - sample_steps: 20 - bs: 1 - target_height: 480 - target_width: 832 - num_frames: 81 - guidance_scale: 5.0 - seed: *seed -eval: - eval_pos: [transformed, fake_quant] - type: video_gen - name: t2v - download: False - path: ../assets/wan_t2v/calib/ - bs: 1 - target_height: 480 - target_width: 832 - num_frames: 81 - guidance_scale: 5.0 - output_video_path: ./output_videos_awq/ -quant: - video_gen: - method: Awq - weight: - bit: 6 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 6 - symmetric: True - granularity: per_token - special: - trans: True - trans_version: v2 - weight_clip: True - clip_sym: True -save: - save_lightx2v: True - save_path: /path/to/x2v/ diff --git a/configs/quantization/video_gen/wan_t2v/rtn_w_a.yaml b/configs/quantization/video_gen/wan_t2v/rtn_w_a.yaml deleted file mode 100755 index b6a53b0e0..000000000 --- a/configs/quantization/video_gen/wan_t2v/rtn_w_a.yaml +++ /dev/null @@ -1,32 +0,0 @@ -base: - seed: &seed 42 -model: - type: WanT2V - path: /path/to/wan_t2v - torch_dtype: auto -eval: - eval_pos: [transformed, fake_quant] - type: video_gen - name: t2v - download: False - path: ../assets/wan_t2v/eval/ - bs: 1 - target_height: 480 - target_width: 832 - num_frames: 81 - guidance_scale: 5.0 - output_video_path: ./output_videos_rtn/ -quant: - video_gen: - method: RTN - weight: - bit: 6 - symmetric: True - granularity: per_channel - act: - bit: 6 - symmetric: True - granularity: per_token -save: - save_lightx2v: True - save_path: /path/to/x2v/ diff --git a/configs/quantization/video_gen/wan_t2v/smoothquant_w_a.yaml b/configs/quantization/video_gen/wan_t2v/smoothquant_w_a.yaml deleted file mode 100755 index 7d65f31fc..000000000 --- a/configs/quantization/video_gen/wan_t2v/smoothquant_w_a.yaml +++ /dev/null @@ -1,45 +0,0 @@ -base: - seed: &seed 42 -model: - type: WanT2V - path: /path/to/wan_t2v - torch_dtype: auto -calib: - name: t2v - download: False - path: ../assets/wan_t2v/calib/ - sample_steps: 20 - bs: 1 - target_height: 480 - target_width: 832 - num_frames: 81 - guidance_scale: 5.0 - seed: *seed -eval: - eval_pos: [transformed, fake_quant] - type: video_gen - name: t2v - download: False - path: ../assets/wan_t2v/calib/ - bs: 1 - target_height: 480 - target_width: 832 - num_frames: 81 - guidance_scale: 5.0 - output_video_path: ./output_videos_sq/ -quant: - video_gen: - method: SmoothQuant - weight: - bit: 6 - symmetric: True - granularity: per_channel - act: - bit: 6 - symmetric: True - granularity: per_token - special: - alpha: 0.7 -save: - save_lightx2v: True - save_path: /path/to/x2v/ diff --git a/configs/sparsification/methods/DART/dart.yml b/configs/sparsification/methods/DART/dart.yml deleted file mode 100644 index 104f560c5..000000000 --- a/configs/sparsification/methods/DART/dart.yml +++ /dev/null @@ -1,26 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llava - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, transformed] - type: vqa - name: [mme] - download: False - path: MME dataset path - bs: 1 - inference_per_block: False -sparse: - method: TokenReduction - special: - method: DART - pruning_loc: 5 - reduction_ratio: 0.7778 - pivot_image_token: 4 - pivot_text_token : 4 -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/sparsification/methods/DivPrune/divprune.yml b/configs/sparsification/methods/DivPrune/divprune.yml deleted file mode 100644 index 0234c0bd4..000000000 --- a/configs/sparsification/methods/DivPrune/divprune.yml +++ /dev/null @@ -1,23 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llava - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, transformed] - type: vqa - name: [mme] - download: False - path: MME dataset path - bs: 1 - inference_per_block: False -sparse: - method: TokenReduction - special: - method: DivPrune - reduction_ratio: 0.9444 # 0.7778 0.8889 0.9444 -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/sparsification/methods/DyCoke/dycoke.yml b/configs/sparsification/methods/DyCoke/dycoke.yml deleted file mode 100644 index b945fa538..000000000 --- a/configs/sparsification/methods/DyCoke/dycoke.yml +++ /dev/null @@ -1,26 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llava OneVision - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, transformed] - type: vqa - name: [mme] - download: False - path: MME dataset path - bs: 1 - inference_per_block: False -sparse: - method: TokenReduction - special: - method: DyCoke - dycoke_layer_idx: 3 - num_tokens_per_frame: 196 - merging_ratio: 0.7 - dycoke_radio: 0.7 -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/sparsification/methods/FastV/fastv.yml b/configs/sparsification/methods/FastV/fastv.yml deleted file mode 100644 index 4253eb003..000000000 --- a/configs/sparsification/methods/FastV/fastv.yml +++ /dev/null @@ -1,24 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llava - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, transformed] - type: vqa - name: [mme] - download: False - path: MME dataset path - bs: 1 - inference_per_block: False -sparse: - method: TokenReduction - special: - method: FastV - pruning_loc: 3 - rate: 0.778 # prune_rate -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/sparsification/methods/FastVID/fastvid.yml b/configs/sparsification/methods/FastVID/fastvid.yml deleted file mode 100644 index 6a38f9199..000000000 --- a/configs/sparsification/methods/FastVID/fastvid.yml +++ /dev/null @@ -1,28 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llava OneVision - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, transformed] - type: vqa - name: [mme] - download: False - path: MME dataset path - bs: 1 - inference_per_block: False -sparse: - method: TokenReduction - special: - method: FastVID - retention_ratio: 0.10 - DySeg_c: 8 - DySeg_tau: 0.9 - STPrune_d: 0.4 - DTM_p: 4 - DTM_alpha: 0.6 -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/sparsification/methods/FasterVLM/fastervlm.yml b/configs/sparsification/methods/FasterVLM/fastervlm.yml deleted file mode 100644 index 0b2aaf43d..000000000 --- a/configs/sparsification/methods/FasterVLM/fastervlm.yml +++ /dev/null @@ -1,23 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llava - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, transformed] - type: vqa - name: [mme] - download: False - path: MME dataset path - bs: 1 - inference_per_block: False -sparse: - method: TokenReduction - special: - method: FasterVLM - rate: 0.778 -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/sparsification/methods/Holitom/holitom.yml b/configs/sparsification/methods/Holitom/holitom.yml deleted file mode 100644 index 344c705f5..000000000 --- a/configs/sparsification/methods/Holitom/holitom.yml +++ /dev/null @@ -1,26 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llava OneVision - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, transformed] - type: vqa - name: [mme] - download: False - path: MME dataset path - bs: 1 - inference_per_block: False -sparse: - method: TokenReduction - special: - method: HoliTom - RETAIN_RATIO: 0.20 - T: 0.65 - HOLITOM_k: 18 - HOLITOM_r: 0.5 -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/sparsification/methods/Kvsparse/shadowkv.yml b/configs/sparsification/methods/Kvsparse/shadowkv.yml deleted file mode 100644 index 2a35ba494..000000000 --- a/configs/sparsification/methods/Kvsparse/shadowkv.yml +++ /dev/null @@ -1,22 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: torch.bfloat16 -eval: - eval_pos: [transformed] - name: wikitext2 - download: False - path: eval_data_path - bs: 1 - seq_len: 2048 -sparse: - method: Dense - kvcache: - method: ShadowKV - replace_attn: True - sparsity_out: False -save: - save_trans: False - save_path: ./save diff --git a/configs/sparsification/methods/Kvsparse/sinkkv.yml b/configs/sparsification/methods/Kvsparse/sinkkv.yml deleted file mode 100644 index c7800cc10..000000000 --- a/configs/sparsification/methods/Kvsparse/sinkkv.yml +++ /dev/null @@ -1,25 +0,0 @@ -base: - seed: &seed 42 -model: - type: model_type - path: model path - torch_dtype: torch.bfloat16 -eval: - eval_pos: [transformed] - name: wikitext2 - type: decode_ppl - download: False - path: eval_data_path - bs: 1 - inference_per_block: False - num_samples: 50 - # num_eval_tokens: 3 -sparse: - method: Dense - kvcache: - method: SinkKV - window_length: 256 - num_sink_tokens: 4 -save: - save_fake: False - save_path: /path/to/save/ diff --git a/configs/sparsification/methods/Magnitude/magnitude.yml b/configs/sparsification/methods/Magnitude/magnitude.yml deleted file mode 100644 index 98c23eb02..000000000 --- a/configs/sparsification/methods/Magnitude/magnitude.yml +++ /dev/null @@ -1,29 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llama - path: model path - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [transformed] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 -sparse: - method: Magnitude - weight: - sparsity: 0.5 -save: - save_trans: False - save_path: ./save diff --git a/configs/sparsification/methods/MustDrop/mustdrop.yml b/configs/sparsification/methods/MustDrop/mustdrop.yml deleted file mode 100644 index 87731faeb..000000000 --- a/configs/sparsification/methods/MustDrop/mustdrop.yml +++ /dev/null @@ -1,25 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llava - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, transformed] - type: vqa - name: [mme] - download: False - path: MME dataset path - bs: 1 -sparse: - vision: - method: TokenReduction - special: - method: MustDrop - spatial_threshold: 0.6 - window_size: [3, 3] - retained_tokens: 128 # llava_next: 128, 64, 32 llava: 192, 128, 64 -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/sparsification/methods/PruneVid/prunevid.yml b/configs/sparsification/methods/PruneVid/prunevid.yml deleted file mode 100644 index 67f18b04d..000000000 --- a/configs/sparsification/methods/PruneVid/prunevid.yml +++ /dev/null @@ -1,28 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llava OneVision - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, transformed] - type: vqa - name: [mme] - download: False - path: MME dataset path - bs: 1 - inference_per_block: False -sparse: - method: TokenReduction - special: - method: PruneVid - lora_alpha: 14 - selected_layers: 10 - alphas: 0.4 - taus: 0.8 - temporal_segment_ratios: 0.25 - cluster_ratios: 0.5 -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/sparsification/methods/PyramidDrop/pyramidrop.yml b/configs/sparsification/methods/PyramidDrop/pyramidrop.yml deleted file mode 100644 index 39694ee41..000000000 --- a/configs/sparsification/methods/PyramidDrop/pyramidrop.yml +++ /dev/null @@ -1,24 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llava - path: model path - torch_dtype: auto -eval: - eval_pos: [transformed] - type: vqa - name: [mme] - download: False - path: MME dataset path - bs: 1 - inference_per_block: False -sparse: - method: TokenReduction - special: - method: PyramidDrop - image_token_ratio_list: [0.5, 0.25, 0.125] - layer_list: [8, 16, 24] -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/sparsification/methods/ShortGPT/shortgpt.yml b/configs/sparsification/methods/ShortGPT/shortgpt.yml deleted file mode 100644 index 05f285a90..000000000 --- a/configs/sparsification/methods/ShortGPT/shortgpt.yml +++ /dev/null @@ -1,29 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llama - path: model path - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [transformed] - name: [wikitext2, c4] - download: False - path: eval data path - seq_len: 2048 -sparse: - method: ShortGPT - weight: - n_prune_layers: 9 -save: - save_trans: False - save_lightllm: False - save_path: ./save diff --git a/configs/sparsification/methods/SparseVLM/sparsevlm.yml b/configs/sparsification/methods/SparseVLM/sparsevlm.yml deleted file mode 100644 index e2c117ee4..000000000 --- a/configs/sparsification/methods/SparseVLM/sparsevlm.yml +++ /dev/null @@ -1,25 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llava - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, transformed] - type: vqa - name: [mme] - download: False - path: MME dataset path - bs: 1 - inference_per_block: False -sparse: - method: TokenReduction - special: - method: SparseVLM - pruning_loc: [2, 6, 15] - reduction_ratio: 0.6667 - merge_flag: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/sparsification/methods/SparseVLM/sparsevlm_multi_turn.yml b/configs/sparsification/methods/SparseVLM/sparsevlm_multi_turn.yml deleted file mode 100644 index 2fc436d74..000000000 --- a/configs/sparsification/methods/SparseVLM/sparsevlm_multi_turn.yml +++ /dev/null @@ -1,29 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llava - path: model path - torch_dtype: auto -eval: - eval_pos: [transformed] # transformed - name: custom_gen - type: just_infer - download: False - path: /data/nvme1/yongyang/projects/llmc_plus/general_custom_data - apply_chat_template: True - bs: 1 - inference_per_block: False - max_new_tokens: 512 - statistics: False -sparse: - method: TokenReduction - special: - method: SparseVLM - pruning_loc: [2, 6, 15] - retained_tokens: 192 - prune_flag: True - merge_flag: True -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/sparsification/methods/ToMe/tome.yml b/configs/sparsification/methods/ToMe/tome.yml deleted file mode 100644 index b4186bc26..000000000 --- a/configs/sparsification/methods/ToMe/tome.yml +++ /dev/null @@ -1,25 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llava - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, transformed] - type: vqa - name: [mme] - download: False - path: MME dataset path - bs: 1 - inference_per_block: False -sparse: - vision: - method: TokenReduction - special: - method: ToMe - r: 16 # 16 or (16, -1.0) or [16,16,16,16,16,16] - -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/sparsification/methods/VisPruner/vispruner.yml b/configs/sparsification/methods/VisPruner/vispruner.yml deleted file mode 100644 index eef5ffc88..000000000 --- a/configs/sparsification/methods/VisPruner/vispruner.yml +++ /dev/null @@ -1,26 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llava - path: model path - torch_dtype: auto -eval: - - eval_pos: [pretrain, transformed] - type: vqa - name: [mme] - download: False - path: MME dataset path - bs: 1 - inference_per_block: False -sparse: - vision: - method: TokenReduction - special: - method: VisPruner - prune_ratio: 0.7778 # 0.6667 0.7778 0.8889 - important_ratio: 0.5 -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/sparsification/methods/VisionZip/visionzip.yml b/configs/sparsification/methods/VisionZip/visionzip.yml deleted file mode 100644 index 19bf9d9e6..000000000 --- a/configs/sparsification/methods/VisionZip/visionzip.yml +++ /dev/null @@ -1,27 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llava - path: model path - torch_dtype: auto -eval: - eval_pos: [transformed] - type: vqa - name: [mme] - download: False - path: MME dataset path - bs: 1 - inference_per_block: False -sparse: - vision: - method: TokenReduction - special: - method: VisionZip # retain - dominant: 162 # visual_tokens = dominan_tokens + contextual - contextual: 30 # llava: 162+30,108+20,54+10 llava_next: 108+20,54+10,27+5 - prune_only: False - merge_only: False -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ diff --git a/configs/sparsification/methods/Wanda/wanda.yml b/configs/sparsification/methods/Wanda/wanda.yml deleted file mode 100644 index a1082bd3e..000000000 --- a/configs/sparsification/methods/Wanda/wanda.yml +++ /dev/null @@ -1,30 +0,0 @@ -base: - seed: &seed 42 -model: - type: Llama - path: model path - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: txt_general_preproc - seed: *seed -eval: - eval_pos: [transformed] - name: [wikitext2, c4] - download: False - path: eval data path - bs: 1 - seq_len: 2048 -sparse: - method: Wanda - weight: - sparsity: 0.5 - sparsity_out: False -save: - save_trans: False - save_path: ./save diff --git a/docs/API_guide.md b/docs/API_guide.md new file mode 100644 index 000000000..de2f1420f --- /dev/null +++ b/docs/API_guide.md @@ -0,0 +1,198 @@ +# TemplateAPI Usage Guide + +The `TemplateAPI` class is a versatile superclass designed to facilitate the integration of various API-based language models into the lm-evaluation-harness framework. This guide will explain how to use and extend the `TemplateAPI` class to implement your own API models. If your API implements the OpenAI API you can use the `local-completions` or the `local-chat-completions` (defined [here](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/models/openai_completions.py)) model types, which can also serve as examples of how to effectively subclass this template. + +## Overview + +The `TemplateAPI` class provides a template for creating API-based model implementations. It handles common functionalities such as: + +- Tokenization (optional) +- Batch processing +- Caching +- Retrying failed requests +- Parsing API responses + +To use this class, you typically need to subclass it and implement specific methods for your API. + +## Key Methods to Implement + +When subclassing `TemplateAPI`, you need to implement the following methods: + +1. `_create_payload`: Creates the JSON payload for API requests. +2. `parse_logprobs`: Parses log probabilities from API responses. +3. `parse_generations`: Parses generated text from API responses. +4. `headers`: Returns the headers for the API request. + +You may also need to override other methods or properties depending on your API's specific requirements. + +> [!NOTE] +> Currently loglikelihood and MCQ based tasks (such as MMLU) are only supported for completion endpoints. Not for chat-completion — those that expect a list of dicts — endpoints! Completion APIs which support instruct tuned models can be evaluated with the `--apply_chat_template` option in order to simultaneously evaluate models using a chat template format while still being able to access the model logits needed for loglikelihood-based tasks. + +# TemplateAPI Usage Guide + +## TemplateAPI Arguments + +When initializing a `TemplateAPI` instance or a subclass, you can provide several arguments to customize its behavior. Here's a detailed explanation of some important arguments: + +- `model` or `pretrained` (str): + - The name or identifier of the model to use. + - `model` takes precedence over `pretrained` when both are provided. + +- `base_url` (str): + - The base URL for the API endpoint. + +- `tokenizer` (str, optional): + - The name or path of the tokenizer to use. + - If not provided, it defaults to using the same tokenizer name as the model. + +- `num_concurrent` (int): + - Number of concurrent requests to make to the API. + - Useful for APIs that support parallel processing. + - Default is 1 (sequential processing). + +- `tokenized_requests` (bool): + - Determines whether the input is pre-tokenized. Defaults to `True`. + - Requests can be sent in either tokenized form (`list[list[int]]`) or as text (`list[str]`, or `str` for batch_size=1). + - For loglikelihood-based tasks, prompts require tokenization to calculate the context length. If `False` prompts are decoded back to text before being sent to the API. + - Not as important for `generate_until` tasks. + - Ignored for chat formatted inputs (list[dict...]) or if tokenizer_backend is None. + +- `tokenizer_backend` (str, optional): + - Required for loglikelihood-based or MCQ tasks. + - Specifies the tokenizer library to use. Options are "tiktoken", "huggingface", or None. + - Default is "huggingface". + +- `max_length` (int, optional): + - Maximum length of input + output. + - Default is 2048. + +- `max_retries` (int, optional): + - Maximum number of retries for failed API requests. + - Default is 3. + +- `max_gen_toks` (int, optional): + - Maximum number of tokens to generate in completion tasks. + - Default is 256 or set in task yaml. + +- `batch_size` (int or str, optional): + - Number of requests to batch together (if the API supports batching). + - Can be an integer or "auto" (which defaults to 1 for API models). + - Default is 1. + +- `seed` (int, optional): + - Random seed for reproducibility. + - Default is 1234. + +- `add_bos_token` (bool, optional): + - Whether to add the beginning-of-sequence token to inputs (when tokenizing). + - Default is False. + +- `custom_prefix_token_id` (int, optional): + - Custom token ID to use as a prefix for inputs. + - If not provided, uses the model's default BOS or EOS token (if `add_bos_token` is True). + + +Example usage: + +```python +class MyAPIModel(TemplateAPI): + def __init__(self, **kwargs): + super().__init__( + model="my-model", + base_url="https://api.mymodel.com/v1/completions", + tokenizer_backend="huggingface", + num_concurrent=5, + max_retries=5, + batch_size=10, + **kwargs + ) + + # Implement other required methods... +``` + +When subclassing `TemplateAPI`, you can override these arguments in your `__init__` method to set default values specific to your API. You can also add additional (potentially user-specified) arguments as needed for your specific implementation. + +## Example Implementation: OpenAI API + +The `OpenAICompletionsAPI` and `OpenAIChatCompletion` ([here](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/models/openai_completions.py) classes demonstrate how to implement API models using the `TemplateAPI` class. Here's a breakdown of the key components: + +### 1. Subclassing and Initialization + +```python +@register_model("openai-completions") +class OpenAICompletionsAPI(LocalCompletionsAPI): + def __init__( + self, + base_url="https://api.openai.com/v1/completions", + tokenizer_backend="tiktoken", + **kwargs, + ): + super().__init__( + base_url=base_url, tokenizer_backend=tokenizer_backend, **kwargs + ) +``` + +### 2. Implementing API Key Retrieval + +```python +@cached_property +def api_key(self): + key = os.environ.get("OPENAI_API_KEY", None) + if key is None: + raise ValueError( + "API key not found. Please set the OPENAI_API_KEY environment variable." + ) + return key +``` + +### 3. Creating the Payload + +```python +def _create_payload( + self, + messages: Union[List[List[int]], List[dict], List[str], str], + generate=False, + gen_kwargs: Optional[dict] = None, + **kwargs, +) -> dict: + if generate: + # ... (implementation for generation) + else: + # ... (implementation for log likelihood) +``` + +### 4. Parsing API Responses + +```python +@staticmethod +def parse_logprobs( + outputs: Union[Dict, List[Dict]], + tokens: List[List[int]] = None, + ctxlens: List[int] = None, + **kwargs, +) -> List[Tuple[float, bool]]: + # ... (implementation) + +@staticmethod +def parse_generations(outputs: Union[Dict, List[Dict]], **kwargs) -> List[str]: + # ... (implementation) +``` + +The requests are initiated in the `model_call` or the `amodel_call` methods. + +## Implementing Your Own API Model + +To implement your own API model: + +1. Subclass `TemplateAPI` or one of its subclasses (e.g., `LocalCompletionsAPI`). +2. Override the `__init__` method if you need to set specific parameters. +3. Implement the `_create_payload` and `header` methods to create the appropriate payload for your API. +4. Implement the `parse_logprobs` and `parse_generations` methods to parse your API's responses. +5. Override the `api_key` property if your API requires authentication. +6. Override any other methods as necessary to match your API's behavior. + +## Best Practices + +1. Use the `@register_model` decorator to register your model with the framework (and import it in `lm_eval/models/__init__.py`!). +3. Use environment variables for sensitive information like API keys. +4. Properly handle batching and concurrent requests if supported by your API. diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md new file mode 100644 index 000000000..48b5c332c --- /dev/null +++ b/docs/CONTRIBUTING.md @@ -0,0 +1,79 @@ +# Contributing to LM Evaluation Harness + +Welcome and thank you for your interest in the LM Evaluation Harness! We welcome contributions and feedback and appreciate your time spent with our library, and hope you find it useful! + +## Important Resources + +There are several places information about LM Evaluation Harness is located: + +- Our [documentation pages](https://github.com/EleutherAI/lm-evaluation-harness/tree/main/docs) +- We occasionally use [GitHub Milestones](https://github.com/EleutherAI/lm-evaluation-harness/milestones) to track progress toward specific near-term version releases. +- We maintain a [Project Board](https://github.com/orgs/EleutherAI/projects/25) for tracking current work items and PRs, and for future roadmap items or feature requests. +- Further discussion and support conversations are located in the #lm-thunderdome channel of the [EleutherAI discord](https://discord.gg/eleutherai). + +## Code Style + +LM Evaluation Harness uses [ruff](https://github.com/astral-sh/ruff) for linting via [pre-commit](https://pre-commit.com/). + +You can install linters and dev tools via + +```pip install lm_eval[dev]``` or ```pip install -e ".[dev]"``` + +Then, run + +```pre-commit install``` + +in order to ensure linters and other checks will be run upon committing. + +## Testing + +We use [pytest](https://docs.pytest.org/en/latest/) for running unit tests. All library unit tests can be run via: + +``` +python -m pytest --showlocals -s -vv -n=auto --ignore=tests/models/test_neuralmagic.py --ignore=tests/models/test_openvino.py +``` + +## Contributor License Agreement + +We ask that new contributors agree to a Contributor License Agreement affirming that EleutherAI has the rights to use your contribution to our library. +First-time pull requests will have a reply added by @CLAassistant containing instructions for how to confirm this, and we require it before merging your PR. + + +## Contribution Best Practices + +We recommend a few best practices to make your contributions or reported errors easier to assist with. + +**For Pull Requests:** +- PRs should be titled descriptively, and be opened with a brief description of the scope and intent of the new contribution. +- New features should have appropriate documentation added alongside them. +- Aim for code maintainability, and minimize code copying. +- If opening a task, try to share test results on the task using a publicly-available model, and if any public results are available on the task, compare to them. + +**For Feature Requests:** +- Provide a short paragraph's worth of description. What is the feature you are requesting? What is its motivation, and an example use case of it? How does this differ from what is currently supported? + +**For Bug Reports**: +- Provide a short description of the bug. +- Provide a *reproducible example*--what is the command you run with our library that results in this error? Have you tried any other steps to resolve it? +- Provide a *full error traceback* of the error that occurs, if applicable. A one-line error message or small screenshot snippet is unhelpful without the surrounding context. +- Note what version of the codebase you are using, and any specifics of your environment and setup that may be relevant. + +**For Requesting New Tasks**: +- Provide a 1-2 sentence description of what the task is and what it evaluates. +- Provide a link to the paper introducing the task. +- Provide a link to where the dataset can be found. +- Provide a link to a paper containing results on an open-source model on the task, for use in comparisons and implementation validation. +- If applicable, link to any codebase that has implemented the task (especially the original publication's codebase, if existent). + +## How Can I Get Involved? + +To quickly get started, we maintain a list of good first issues, which can be found [on our project board](https://github.com/orgs/EleutherAI/projects/25/views/8) or by [filtering GH Issues](https://github.com/EleutherAI/lm-evaluation-harness/issues?q=is%3Aopen+label%3A%22good+first+issue%22+label%3A%22help+wanted%22). These are typically smaller code changes or self-contained features which can be added without extensive familiarity with library internals, and we recommend new contributors consider taking a stab at one of these first if they are feeling uncertain where to begin. + +There are a number of distinct ways to contribute to LM Evaluation Harness, and all are extremely helpful! A sampling of ways to contribute include: +- **Implementing and verifying new evaluation tasks**: Is there a task you'd like to see LM Evaluation Harness support? Consider opening an issue requesting it, or helping add it! Verifying and cross-checking task implementations with their original versions is also a very valuable form of assistance in ensuring standardized evaluation. +- **Improving documentation** - Improvements to the documentation, or noting pain points / gaps in documentation, are helpful in order for us to improve the user experience of the library and clarity + coverage of documentation. +- **Testing and devops** - We are very grateful for any assistance in adding tests for the library that can be run for new PRs, and other devops workflows. +- **Adding new modeling / inference library integrations** - We hope to support a broad range of commonly-used inference libraries popular among the community, and welcome PRs for new integrations, so long as they are documented properly and maintainable. +- **Proposing or Contributing New Features** - We want LM Evaluation Harness to support a broad range of evaluation usecases. If you have a feature that is not currently supported but desired, feel free to open an issue describing the feature and, if applicable, how you intend to implement it. We would be happy to give feedback on the cleanest way to implement new functionalities and are happy to coordinate with interested contributors via GH discussions or via discord. + +We hope that this has been helpful, and appreciate your interest in contributing! Further questions can be directed to [our Discord](discord.gg/eleutherai). diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000..f040eabae --- /dev/null +++ b/docs/README.md @@ -0,0 +1,11 @@ +# Eval Harness Documentation + +Welcome to the docs for the LM Evaluation Harness! + +## Table of Contents + +* To learn about the public interface of the library, as well as how to evaluate via the command line or as integrated into an external library, see the [Interface](./interface.md). +* To learn how to add a new library, API, or model type to the library, as well as a quick explainer on the types of ways to evaluate an LM, see the [Model Guide](./model_guide.md). + * For an extended description of how to extend the library to new model classes served over an API, see the [API Guide](./API_guide.md). +* For a crash course on adding new tasks to the library, see our [New Task Guide](./new_task_guide.md). +* To learn more about pushing the limits of task configuration that the Eval Harness supports, see the [Task Configuration Guide](./task_guide.md). diff --git a/docs/decontamination.md b/docs/decontamination.md new file mode 100644 index 000000000..cdda0e218 --- /dev/null +++ b/docs/decontamination.md @@ -0,0 +1,71 @@ +# Decontamination + +## Usage + +The provided directory should contain +the ngram files and info.json produced in "Pile Ngram Generation" further down. + +```bash +python -m lm_eval \ + --model gpt2 \ + --device 0 \ + --tasks sciq +``` + +## Background +Downstream evaluations test model generalization, and are less useful when test set data also exists in the training set, referred to as leakage or contamination. + +Filtering your training set against the test set is a good first step, however this isn't always possible, as in the case of a new benchmark or one that wasn't considered prior to model training. When training set filtering isn't possible, it is useful to measure the impact of test set leakage by detecting the contaminated test examples and producing a clean version of the benchmark. + +The basis for our decontamination procedure can be found in Appendix C of "Language Models are Few-Shot Learners". OpenAI defined a test document as contaminated if any N-gram overlap existed with any training document. They used a range of N values between 8 and 13 depending on dataset, while we just used 13 for simplicity. + +## Implementation +Contamination detection can be found in `lm_eval/decontaminate.py` with supporting code in `lm_eval/decontamination/`. + +decontaminate.py does the following: +1. Build dictionaries of all ngrams and their corresponding evaluation/document ids. +2. Scan through sorted files containing training set n-grams. +3. If a match is found, the corresponding evaluation/document combinations are marked as contaminated. + +`lm_eval/evaluator.py` can then produce a clean version of the benchmark by excluding the results of contaminated documents. For each metric, a clean version will be shown in the results with a "decontaminate" suffix. + +This is disabled by default for new tasks, to support decontamination on a task override the "should_decontaminate" and "doc_to_decontamination_query" methods. For more details see the [task guide](task_guide.md). + +## Pile Ngram Generation +The relevant scripts can be found in `scripts/clean_training_data`, which also import from +`lm_eval/decontamination/` + +1. git clone https://github.com/EleutherAI/lm-evaluation-harness.git +2. pip install -r requirements.txt +3. Download The Pile from [The Eye](https://the-eye.eu/public/AI/pile/train/) +4. Place pile files in "pile" directory under "lm-evaluation-harness" (or create a symlink) +5. Run generate_13_grams. + +```bash +export PYTHONHASHSEED=0 +python -m scripts/clean_training_data/generate_13_grams \ + -dir path/to/working/directory \ + -n 13 \ + -buckets 500 +``` + +Took approximately 4 days for us. We had the time to wait, but this could be scaled out by doing partial pile scans on multiple instances of this script and merging the relevant buckets. We fixed PYTHONHASHSEED to ensure reproducibility of bucket hashing in case you need to stop and start. + +6. Sort the generated 13-grams. +```bash +python -m scripts/clean_training_data/sort_13_gram_buckets \ + -dir path/to/working/directory/output +``` + +Took approximately 5 days for us. You could speed this up by spreading the files around to different machines and running the sort script before gathering them together. + +7. Compress the sorted 13 grams files and place them together with info.json. + +This step only takes a few hours. + +```bash +python -m scripts/clean_training_data/compress_and_package \ + -dir path/to/working/directory \ + -output path/to/final/directory \ + -procs 8 +``` diff --git a/docs/en/.readthedocs.yaml b/docs/en/.readthedocs.yaml deleted file mode 100644 index c2343e261..000000000 --- a/docs/en/.readthedocs.yaml +++ /dev/null @@ -1,17 +0,0 @@ -version: 2 - -# Set the version of Python and other tools you might need -build: - os: ubuntu-20.04 - tools: - python: "3.10" - -formats: - - epub - -sphinx: - configuration: docs/en/source/conf.py - -python: - install: - - requirements: requirements/docs.txt \ No newline at end of file diff --git a/docs/en/Makefile b/docs/en/Makefile deleted file mode 100644 index d0c3cbf10..000000000 --- a/docs/en/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = source -BUILDDIR = build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/en/make.bat b/docs/en/make.bat deleted file mode 100644 index 747ffb7b3..000000000 --- a/docs/en/make.bat +++ /dev/null @@ -1,35 +0,0 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set SOURCEDIR=source -set BUILDDIR=build - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.https://www.sphinx-doc.org/ - exit /b 1 -) - -if "%1" == "" goto help - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% - -:end -popd diff --git a/docs/en/source/advanced/VLM_quant&img-txt_dataset.md b/docs/en/source/advanced/VLM_quant&img-txt_dataset.md deleted file mode 100644 index 28490a50b..000000000 --- a/docs/en/source/advanced/VLM_quant&img-txt_dataset.md +++ /dev/null @@ -1,77 +0,0 @@ -# VLM Quantization and custom_mm Datasets - -llmc currently supports calibrating and quantizing VLM models using custom_mm datasets. - -## VLM Quantization -The currently supported models are: -1. llava -2. intervl2 -3. llama3.2 -4. qwen2vl - -More VLM models are under development. - -Here is an example configuration. You can refer to the [Calibration Dataset Template](https://github.com/user-attachments/files/18433608/general_custom_data_examples.zip) on GitHub.: - -```yaml -model: - type: Llava - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: custom_mm - download: False - path: calib data path - apply_chat_template: True - add_answer: True # Defalut is False. If set it to Ture, calib data will add answers. - n_samples: 8 - bs: -1 - seq_len: 512 - padding: True -``` - -## custom_mm datatsets -The format of the custom_mm dataset is as follows: -``` -img_txt-datasets/ -├── images/ -│ ├── image1.jpg -│ ├── image2.jpg -│ ├── image3.jpg -│ └── ... (other images) -└── img_qa.json -``` - -Example format of img_qa.json: -```json -[ - { - "image": "images/0a3035bfca2ab920.jpg", - "question": "Is this an image of Ortigia? Please answer yes or no.", - "answer": "Yes" - }, - { - "image": "images/0a3035bfca2ab920.jpg", - "question": "Is this an image of Montmayeur castle? Please answer yes or no.", - "answer": "No" - }, - { - "image": "images/0ab2ed007db301d5.jpg", - "question": "Is this a picture of Highgate Cemetery? Please answer yes or no.", - "answer": "Yes" - } -] -``` -The "answer" field is optional. -The custom_mm dataset can include calibration data that contains only text (except for llama3.2). - -## VLM Evaluation - -LLMC integrates [lmms-eval](https://github.com/EvolvingLMMs-Lab/lmms-eval) for evaluations on various downstream datasets. In the config's eval section, the type should be specified as "vqa", and the downstream evaluation datasets in the name should follow the standards set by lmms-eval. - -``` -eval: - type: vqa - name: [mme] # vqav2, gqa, vizwiz_vqa, scienceqa, textvqa -``` diff --git a/docs/en/source/advanced/Vit_quant&img_dataset.md b/docs/en/source/advanced/Vit_quant&img_dataset.md deleted file mode 100644 index 168e134fd..000000000 --- a/docs/en/source/advanced/Vit_quant&img_dataset.md +++ /dev/null @@ -1,46 +0,0 @@ -# Vit quant and img datatsets - -llmc currently supports the use of image datasets for calibration and quantification of Vit models - -## Vit quant - -Here is an example configuration: - -```yaml -model: - type: Vit - path: /models/vit-base-patch16-224 - torch_dtype: auto -calib: - name: imagenet - type: img - download: False - path: img calib datasets path - n_samples: 32 - bs: 1 - seq_len: 512 # Useless arguments for vit - preproc: img_general - seed: *seed -eval: - eval_pos: [pretrain, fake_quant] - name: imagenet - type: acc # acc: accracy - download: False - path: img datasets path - seq_len: 2048 # Useless arguments for vit - bs: 1 - inference_per_block: False - eval_token_consist: False -``` - -## img datatsets -IMG dataset format requirements: There are images in the IMG dataset directory - -The format of the img dataset is as follows: -``` -images/ -├── image1.jpg -├── image2.jpg -├── image3.jpg -└── ... (other images) -``` diff --git a/docs/en/source/advanced/custom_dataset.md b/docs/en/source/advanced/custom_dataset.md deleted file mode 100644 index f715e22dd..000000000 --- a/docs/en/source/advanced/custom_dataset.md +++ /dev/null @@ -1,31 +0,0 @@ -# Custom calibration datasets - -Llmc currently supports the following types of calibration datasets. - -1. pileval - -2. wikitext2 - -3. c4 - -4. ptb - -5. custom - -where custom means that a user-defined calibration dataset is used. For some proprietary models in specific scenarios, it is more appropriate to use the data from that scenario for the calibration data when quantizing. Here's an example of a configuration. - - -``` -calib: - name: custom - download: False - load_from_txt: True - path: # Custom dataset, ending with txt as suffix - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: random_truncate_txt - seed: *seed -``` - -Users can write a piece of data text to a txt file, each line represents a piece of text data, using the above configuration, you can achieve the calibration of custom data sets. diff --git a/docs/en/source/advanced/mix_bits.md b/docs/en/source/advanced/mix_bits.md deleted file mode 100644 index fa5537bc5..000000000 --- a/docs/en/source/advanced/mix_bits.md +++ /dev/null @@ -1,125 +0,0 @@ -# Layerwise mixed bits quantization - -llmc currently supports layerwise mixed bit quantization, which can achieve any degree of mixing. - -Here are some sample settings: - -1. The model as a whole implements 4-bit weight-only quantification, and all down_proj implements 8-bit weight-only quantification. - -``` -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - mix_bits: - setting_0: - layer_name: [down_proj] - do_quant: True - weight: - bit: 8 - symmetric: False - granularity: per_group - group_size: 128 -``` - -2. The model as a whole implements 4-bit weight-only quantization, 8-bit weight-only quantification is implemented for down_proj in the 0, 1, 2, 3, 28, 29, 30, and 31 blocks, and all o_proj are not quantified. - -``` -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - mix_bits: - setting_0: - layer_name: [down_proj#0-1-2-3-28-29-30-31] - do_quant: True - weight: - bit: 8 - symmetric: False - granularity: per_group - group_size: 128 - setting_1: - layer_name: [o_proj] - do_quant: False -``` - -3. The model as a whole implements W4A4 quantification, and all down_proj implements W8A8 quantification. - -``` -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_channel - act: - bit: 4 - symmetric: False - granularity: per_token - mix_bits: - setting_0: - layer_name: [down_proj] - do_quant: True - weight: - bit: 8 - symmetric: False - granularity: per_channel - act: - bit: 8 - symmetric: False - granularity: per_token -``` - -4. A mixing enough config that it may not make practical sense. - -``` -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_channel - act: - bit: 4 - symmetric: False - granularity: per_token - mix_bits: - setting_0: - layer_name: [down_proj#0-1-8-15] - do_quant: True - weight: - bit: 8 - symmetric: False - granularity: per_channel - act: - bit: 8 - symmetric: False - granularity: per_token - setting_1: - layer_name: [down_proj#2-6-4-11, o_proj#2-7] - do_quant: False - setting_2: - layer_name: [down_proj#27] - do_quant: True - weight: - bit: 6 - symmetric: False - granularity: per_channel - act: - bit: 6 - symmetric: False - granularity: per_token - setting_3: - layer_name: [down_proj#13-21] - do_quant: True - weight: - bit: 4 - symmetric: False - granularity: per_channel -``` diff --git a/docs/en/source/advanced/model_test_v1.md b/docs/en/source/advanced/model_test_v1.md deleted file mode 100644 index 273380019..000000000 --- a/docs/en/source/advanced/model_test_v1.md +++ /dev/null @@ -1,208 +0,0 @@ -# Model accuracy test V1 - -## Accuracy test pipeline - -LLMC supports basic PPL (Perplexity) evaluation, but more downstream task evaluations are not supported by LLMC itself. - -It is common practice to use evaluation tools to directly test the inference of the model, including but not limited to: - -1. [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness) - -2. [opencompass](https://github.com/open-compass/opencompass) - -However, this evaluation method is not efficient, so we recommend using the inference engine evaluation tool to separate the model accuracy evaluation, the model is inferred by the inference engine, and served in the form of an API, and the evaluation tool evaluates the API. This approach has the following benefits: - - -1. Using an efficient inference engine for model inference can speed up the entire evaluation process - -2. The reasoning of the model and the evaluation of the model are separated, and each is responsible for its own professional affairs, and the code structure is clearer - -3. Using the inference engine to infer a model is more in line with the actual deployment scenario and easier to align with the accuracy of the actual deployment of the model - -We recommend and introduce the compression-deployment-evaluation process using the following model: **LLMC compression-lightllm inference-opencompass evaluation** - - -Here are the links to the relevant tools: - -1. llmc, Large language Model Compression Tool, [(GitHub)(https://github.com/ModelTC/llmc), [Doc](https://llmc-zhcn.readthedocs.io/en/latest/)] - -2. Lightllm, Large language Model Inference Engine, [[GitHub](https://github.com/ModelTC/lightllm)] - -3. OpenCompass, Large language Model Evaluation Tool, [[GitHub]((https://github.com/open-compass/opencompass)), [Doc](https://opencompass.readthedocs.io/zh-cn/latest/)] - -## Use of the lightLLM inference engine - -The official [lightllm](https://github.com/ModelTC/llmc) repository has more detailed documentation, but here is a simple and quick start - - start a service of a float model - -**install lightllm** - -``` -git clone https://github.com/ModelTC/lightllm.git -cd lightllm -pip install -v -e . -``` - -**start a service** - -``` -python -m lightllm.server.api_server --model_dir # model path \ - --host 0.0.0.0 \ - --port 1030 \ - --nccl_port 2066 \ - --max_req_input_len 6144 \ - --max_req_total_len 8192 \ - --tp 2 \ - --trust_remote_code \ - --max_total_token_num 120000 -``` - -The above command will serve a 2-card on port 1030 of the machine - -The above commands can be set by the number of tp, and TensorParallel inference can be performed on tp cards, which is suitable for inference of larger models. - -The max_total_token_num in the above command will affect the throughput performance during the test, and can be set according to the lightllm [documentation](https://github.com/ModelTC/lightllm/blob/main/docs/ApiServerArgs.md). As long as the gpu memory is not exploded, the larger the setting, the better. - -If you want to set up multiple lightllm services on the same machine, you need to reset the port and nccl_port above without conflicts. - - Simple testing of the service - -Execute the following python script - -``` -import requests -import json - -url = 'http://localhost:1030/generate' -headers = {'Content-Type': 'application/json'} -data = { - 'inputs': 'What is AI?', - "parameters": { - 'do_sample': False, - 'ignore_eos': False, - 'max_new_tokens': 128, - } -} -response = requests.post(url, headers=headers, data=json.dumps(data)) -if response.status_code == 200: - print(response.json()) -else: - print('Error:', response.status_code, response.text) -``` - -If the above script returns normally, the service is normal - - start a service of a quantization model - -``` -python -m lightllm.server.api_server --model_dir 模型路径 \ - --host 0.0.0.0 \ - --port 1030 \ - --nccl_port 2066 \ - --max_req_input_len 6144 \ - --max_req_total_len 8192 \ - --tp 2 \ - --trust_remote_code \ - --max_total_token_num 120000 \ - --mode triton_w4a16 -``` - -Added to the command `--mode triton_w4a16`, indicates that the naive quantization of w4a16 was used - -After the service is started, you also need to verify whether the service is normal - -The model path used by the above command is the original pre-trained model and has not been adjusted by the llmc. You can follow the LLMC documentation, open the save_trans, save a modified model, and then run the naive quantization service command described above. - -## Use of the opencompass evaluation tool - -The official [opencompass](https://github.com/open-compass/opencompass) repository has more detailed documentation, but here is a simple and quick start - -**install opencompass** - -``` -git clone https://github.com/open-compass/opencompass.git -cd opencompass -pip install -v -e . -``` - -**Modify the config** - -The config file is [here](https://github.com/open-compass/opencompass/blob/main/configs/eval_lightllm.py), this configuration file is used by OpenCompass to evaluate the accuracy of Lightllm's API service, and it should be noted that the port inside it url should be consistent with the above Lightllm service port - - -For the selection of the evaluation dataset, you need to modify this part of the code - -``` -with read_base(): - from .summarizers.leaderboard import summarizer - from .datasets.humaneval.deprecated_humaneval_gen_a82cae import humaneval_datasets -``` - -The above code snippet, which represents the test humaneval dataset, can be found here for more dataset testing support - -**Dataset download** - -It is necessary to prepare the dataset according to the OpenCompass [documentation](https://opencompass.readthedocs.io/en/latest/get_started/installation.html#dataset-preparation). - -**Run accuracy tests** - -After modifying the above configuration file, you can run the following command -``` -python run.py configs/eval_lightllm.py -``` -When the model has completed the inference and metric calculations, we can get the evaluation results of the model. The output folder will be generated in the current directory, the logs subfolder will record the logs in the evaluation, and the summary subfile will record the accuracy of the measured data set - -## Use of the lm-evaluation-harness evaluation tool - -Besides the above-mentioned methods, we also recommend people use [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness). We have already integrated this tool in ours. After cloning the submodules of our llmc, people can refer to the following commands to evaluate the quantized model/full precision model: - -``` -export CUDA_VISIBLE_DEVICES=4,5,6,7 -llmc=./llmc -lm_eval=./llmc/lm-evaluation-harness -export PYTHONPATH=$llmc:$PYTHONPATH -export PYTHONPATH=$llmc:$lm_eval:$PYTHONPATH -# Replace the config file (i.e., RTN with algorithm-transformed model path or notate quant with original model path) -# with the one you want to use. `--quarot` depends on the transformation algorithm used before. -accelerate launch --multi_gpu --num_processes 4 llmc/tools/llm_eval.py \ - --config llmc/configs/quantization/RTN/rtn_quarot.yml \ - --model hf \ - --quarot \ - --tasks lambada_openai,arc_easy \ - --model_args parallelize=False \ - --batch_size 64 \ - --output_path ./save/lm_eval \ - --log_samples -``` - -We preserve the command in [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness). There are only two more arguments ``--config`` and ``--quarot``. The former is for loading the transformed model (saved by ``save_trans``) or the original hugginface model, depending on the model path. Otherwise, remove ``quant`` part in the config to perform evaluation for the full-precision model, and we only support RTN quant, where all related quantization granularities need to align with the setting of the transformed model. The latter is employed if the model is transformed by [QuaRot](https://arxiv.org/abs/2404.00456). - -*Remark: Please cancel the paralleize (or paralleize=False) and pretrained=\* in ``--model_args`` for evaluation.* - -## FAQ - -** Q1 ** - -What does the dataset configuration file in OpenCompass mean when the same dataset has different suffixes? - -** Solution ** - -Different suffixes represent different prompt templates, and for detailed OpenCompass questions, please refer to the OpenCompass documentation - -** Q2 ** - -The test accuracy of the Humaneval of the LLAMA model is too low - -** Solution ** - -You may need to delete the \n at the end of each entry in the Humaneval json file in the dataset provided by OpenCompass and retest it - -** Q3 ** - -The test is still not fast enough - -** Solution ** - -You can consider whether the max_total_token_num parameter settings are reasonable when starting the lightllm service, and if the setting is too small, the test concurrency will be low - diff --git a/docs/en/source/advanced/model_test_v2.md b/docs/en/source/advanced/model_test_v2.md deleted file mode 100644 index 39ee8a40b..000000000 --- a/docs/en/source/advanced/model_test_v2.md +++ /dev/null @@ -1,103 +0,0 @@ -# Model accuracy test V2 - -In the accuracy testing of Model accuracy test V1, the process was not streamlined enough. We listened to feedback from the community developers and developed Model Accuracy Test V2. - -In the V2 version, we no longer need to use an inference engine to start a service, nor do we need to break the testing into multiple steps. - -Our goal is to make downstream accuracy testing equivalent to PPL testing. Running a program from llmc will, after completing the algorithm execution, directly conduct PPL testing and simultaneously perform the corresponding downstream accuracy testing. - -To achieve the above goals, we only need to add an opencompass setting in the existing configuration. - - -``` -base: - seed: &seed 42 -model: - type: Llama - path: model path - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: pileval_awq - seed: *seed -eval: - eval_pos: [pretrain, fake_quant] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - special: - weight_clip: False -save: - save_trans: True - save_path: ./save -opencompass: - cfg_path: opencompass config path - output_path: ./oc_output -``` - - The cfg_path in opencompass needs to point to a configuration path for opencompass. - -[Here](https://github.com/ModelTC/llmc/tree/main/configs/opencompass), we have provided the configurations for both the base model and the chat model regarding the human-eval test as a reference for everyone. - -It is important to note that [the configuration provided by opencompass](https://github.com/ModelTC/opencompass/blob/opencompass-llmc/configs/models/hf_llama/hf_llama3_8b.py) needs to have the path key. However, in this case, we do not need this key because llmc will default to using the model path in the save path of trans - -Of course, since the save path of trans model is required, you need to set save_trans to True if you want to test in opencompass. - - The max_num_workers in opencompass refers to the maximum number of inference instances. - -If the model is running on a single GPU, then max_num_workers refers to the number of inference instances to be started, meaning it will occupy max_num_workers number of GPUs. - -If the model is running on multiple GPUs, as in the case of multi-GPU parallel testing (as mentioned below), for example, if the model is running inference on 2 GPUs, then max_num_workers refers to the number of inference instances to be started, meaning it will occupy 2 * max_num_workers number of GPUs. - -In summary, the required number of GPUs = number of PP (pipeline parallelism) * max_num_workers. - -If the required number of GPUs exceeds the actual number of available GPUs, then some workers will have to wait in a queue. - -max_num_workers not only starts multiple inference instances but also splits each dataset into max_num_workers parts, which can be understood as data parallelism. - -Therefore, the optimal setting is to make the required number of GPUs equal to the number of available GPUs. - -For example: - -On a machine with 8 GPUs, if a model runs on a single GPU, then max_num_workers=8. -On a machine with 8 GPUs, if a model runs on 4 GPUs, then max_num_workers=2. -We should try to lower the number of PPs while increasing max_num_workers, because PP parallelism tends to be slower. PP should only be used when the model cannot run on a single GPU, such as for a 70B model that cannot run on a single GPU. In this case, we can set PP=4 and use four 80GB GPUs to run it. - - The output_path in opencompass is used to set the output directory for the evaluation logs of opencompass. - -In this log directory, OpenCompass will output logs for inference and evaluation, detailed inference results, and the final evaluation accuracy. - -Before running the llmc program, you also need to install the version of [opencompass](https://github.com/ModelTC/opencompass/tree/opencompass-llmc) that has been adapted for llmc. - -``` -git clone https://github.com/ModelTC/opencompass.git -b opencompass-llmc -cd opencompass -pip install -v -e . -pip install human-eval -``` - -According to the opencompass [documentation](https://opencompass.readthedocs.io/en/latest/get_started/installation.html#dataset-preparation), prepare the dataset and place it in the current directory where you execute the command. - -Finally, you can load the above configuration and perform model compression and accuracy testing just like running a regular llmc program. - -## Multi-GPU parallel test - -If the model is too large to fit on a single GPU for evaluation, and multi-GPU evaluation is needed, we support using pipeline parallelism when running opencompass. - -What you need to do is: -1. Identify which GPUs are available, add them to CUDA_VISIBLE_DEVICES at the beginning of your run script -2. Modify the file pointed to by cfg_path under opencompass, setting the num_gpus to the desired number. diff --git a/docs/en/source/advanced/sparsification.md b/docs/en/source/advanced/sparsification.md deleted file mode 100644 index 88b26f08a..000000000 --- a/docs/en/source/advanced/sparsification.md +++ /dev/null @@ -1,181 +0,0 @@ -# Model Sparsification - -The llmc is currently gradually supporting sparse methods, having already implemented Magnitude, Wanda, and ShortGPT, and will support more algorithms in the future. - -Here is a sample of Wanda's settings: - - - -``` -base: - seed: &seed 42 -model: - type: Qwen2 # Set the model name, which can support Llama, Qwen2, Llava, Gemma2 and other models. - path: # Set model weight path. - torch_dtype: auto -calib: - name: pileval - download: False - path: # Set calibration dataset path. - n_samples: 512 - bs: 1 - seq_len: 512 - preproc: pileval_smooth - seed: *seed -eval: - eval_pos: [pretrain, transformed] # In the process of unstructured sparsification, the corresponding position weight is reset to 0 directly, and the sparse model can be obtained directly after transformed, without additional deployment stage - name: wikitext2 - download: False - path: # Set eval dataset path. - bs: 1 - seq_len: 2048 -sparse: - method: Wanda - weight: - sparsity: 0.5 # Set model sparsity - sparsity_out: False # Set whether use the output of the sparse layer as the input of the next layer. -save: - save_trans: True # Set to True to save the adjusted weights. - save_path: ./save -``` - -Here are some of the results of using Wanda: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ModelPPL
dense0.250.50.75
c4wikitext2c4wikitext2c4wikitext2c4wikitext2
LLaMa2-7B7.265.477.465.619.256.85260.42259.91
LLaMa2-70B5.713.325.763.46.494.1732.521.66
LLaMa3-8B9.446.1310.016.4715.079.68336.62290.38
LLaMa3-70B7.162.857.443.229.965.8193.9974.78
- - - -The results compared to origin [Wanda](https://github.com/locuslab/wanda) repository are shown below. In this experimental setup, the hyperparameters, calibration data sets, and data preprocessing and evaluation methods used are aligned with Wanda. - - - - - - www.lingdaima.com(零代码excel转HTML) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ModelWandaLLMC
LLaMa2-7b6.916.91
LLaMa2-70b4.224.19
LLaMa3-8b9.569.58
LLaMa3-70bOOM5.75
- - diff --git a/docs/en/source/advanced/token_reduction.md b/docs/en/source/advanced/token_reduction.md deleted file mode 100644 index 68825284f..000000000 --- a/docs/en/source/advanced/token_reduction.md +++ /dev/null @@ -1,72 +0,0 @@ - - -# Token Reduction - -LightCompress currently supports token reduction for mainstream multimodal large language models. Configuration is very simple—plug and play. - -Here is an example configuration - -```yaml -base: - seed: &seed 42 -model: - type: Llava - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, transformed] - type: vqa - name: [gqa, mmbench_en_dev, mme] - bs: 1 - inference_per_block: False -sparse: - method: TokenReduction - special: - method: FastV - pruning_loc: 3 - rate: 0.778 -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ -``` - -The configuration file contains three core sections, including: - -1. **`model`** - For model selection, you can choose LLaVA, LLaVA-NeXT, Qwen2.5VL, and LLaVA OneVision, etc. These models cover both image and video tasks. For the detailed list of supported models, see the file. LightCompress will support more models in the future. - -2. **`eval`** - For the `eval_pos` parameter: - - `pretrain` denotes the original model that keeps all visual tokens. - - `transformed` denotes the model with token reduction applied. - LightCompress integrates lmms-eval to evaluate various downstream datasets. Set `type` to `vqa`, and specify the datasets in `name` following the naming conventions in the lmms-eval documentation. - -3. **`sparse`** - Set `method` to `TokenReduction` first, and then specify the concrete algorithm and related hyperparameters under `special`. Since each algorithm has different hyperparameters, refer to the configuration files for details. - -## Combining Quantization - -LightCompress also supports an extreme compression scheme that combines token reduction with quantization. First, choose a quantization algorithm to save a `fake_qunat` model (see the quantization section of the docs). Then load this model and add the `token_reduction` field under `quant`. - -```yaml -quant: - method: RTN - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - special: - actorder: True - static_groups: True - percdamp: 0.01 - blocksize: 128 - true_sequential: True - quant_out: True - token_reduction: - method: FastV - special: - pruning_loc: 3 - rate: 0.778 -``` \ No newline at end of file diff --git a/docs/en/source/backend/autoawq.md b/docs/en/source/backend/autoawq.md deleted file mode 100644 index 18c65a778..000000000 --- a/docs/en/source/backend/autoawq.md +++ /dev/null @@ -1,112 +0,0 @@ - -# AutoAWQ Quantized Inference - -[AutoAWQ](https://github.com/casper-hansen/AutoAWQ) is an easy-to-use package for 4-bit weight quantization models. Compared to FP16, **AutoAWQ** can speed up models by 3 times and reduce memory requirements by 3 times. **AutoAWQ** implements the Activation-aware Weight Quantization (AWQ) algorithm for quantizing large language models. - -**LLMC** supports exporting the quantization format required by **AutoAWQ** and is compatible with various algorithms, not limited to AWQ. In contrast, **AutoAWQ** only supports the AWQ algorithm, while **LLMC** can export real quantized models via algorithms like GPTQ, AWQ, and Quarot for **AutoAWQ** to load directly and use **AutoAWQ**'s GEMM and GEMV kernels to achieve inference acceleration. - - -## 1.1 Environment Setup - -To perform quantized inference using **AutoAWQ**, first, you need to install and configure the **AutoAWQ** environment: -```bash -INSTALL_KERNELS=1 pip install git+https://github.com/casper-hansen/AutoAWQ.git -# NOTE: This installs https://github.com/casper-hansen/AutoAWQ_kernels -``` - -## 1.2 Quantization Formats - -In **AutoAWQ**'s fixed-point integer quantization, the following common formats are supported: - -- **W4A16**: weights are int4, activations are float16; -- **Weight per-channel/group quantization**: quantization is performed per-channel or per-group; -- **Weight asymmetric quantization**: quantization parameters include scale and zero point; - -Therefore, when quantizing models using **LLMC**, make sure that the bit-width for weights and activations is set to a format supported by **AutoAWQ**. - - -## 1.3 Using LLMC for Model Quantization - -### 1.3.1 Calibration Data - -In this chapter, we use the **Pileval** and **Wikitext** academic datasets as calibration data. For downloading and preprocessing calibration data, refer to [this chapter](https://llmc-en.readthedocs.io/en/latest/configs.html). - -In practical use, we recommend using real deployment data for offline quantization calibration. - - -### 1.3.2 Choosing a Quantization Algorithm - - -**W4A16** - -Under the W4A16 quantization setting, we recommend using the AWQ algorithm in LLMC. - -You can refer to the [AWQ W4A16 weight quantization configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/autoawq/awq_w4a16.yml) for the specific implementation. - -```yaml -# configs/quantization/backend/autoawq/awq_w4a16.yml -quant: - method: Awq - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 - pack_version: gemm_pack - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -``` - -Please note that in this step, the `pack_version` parameter needs to be set to either `gemm_pack` or `gemv_pack`, which correspond to two ways of packing int4 data into `torch.int32`, suitable for loading `GEMM` and `GEMV` kernels in **AutoAWQ** for different needs. For the difference between `GEMM` and `GEMV`, please refer to this [link](https://github.com/casper-hansen/AutoAWQ/tree/main?tab=readme-ov-file#int4-gemm-vs-int4-gemv-vs-fp16). - -Additionally, if AWQ does not meet the precision requirements, other algorithms such as [GPTQ](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/autoawq/gptq_w4a16.yml) can also be tried. We also recommend the **AWQ+OmniQuant combination algorithm** introduced in this [section](https://llmc-en.readthedocs.io/en/latest/practice/awq_omni.html) to further improve accuracy. Corresponding [configuration files](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/autoawq/w4a16_combin) are provided for reference. - - -### 1.3.3 Exporting Real Quantized Model - -```yaml -save: - save_autoawq: True - save_path: /path/to/save_for_autoawq_awq_w4/ -``` -Make sure to set `save_autoawq` to `True`. For the W4A16 quantization setting, LLMC will export the weights packed into `torch.int32` format for **AutoAWQ** to load directly, along with exporting the quantization parameters. - - -### 1.3.4 Running LLMC - -Modify the configuration file path in the run script and execute: - -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=awq_for_autoawq -config=${llmc}/configs/quantization/backend/autoawq/awq_w4a16.yml -``` -Once LLMC finishes running, the real quantized model will be stored in the `save.save_path`. - - -## 1.4 Using AutoAWQ for Inference - - -### 1.4.1 Offline Inference - -We provide an [example](https://github.com/ModelTC/llmc/blob/main/examples/backend/autoawq/infer_with_autoawq.py) of using **AutoAWQ** for offline inference. - -First, clone the **AutoAWQ** repository locally: - -```bash -git clone https://github.com/casper-hansen/AutoAWQ.git -``` - -Next, replace `autoawq_path` in the [example](https://github.com/ModelTC/llmc/blob/main/examples/backend/autoawq/infer_with_autoawq.py) with the local path to your **AutoAWQ** repository, and replace `model_path` in the [example](https://github.com/ModelTC/llmc/blob/main/examples/backend/autoawq/infer_with_autoawq.py) with the path where the model is saved in `save.save_path`. Then run the following command to complete inference: - -```bash -cd examples/backend/autoawq - -CUDA_VISIBLE_DEVICES=0 python infer_with_autoawq.py -``` \ No newline at end of file diff --git a/docs/en/source/backend/lightx2v.md b/docs/en/source/backend/lightx2v.md deleted file mode 100755 index e046f9846..000000000 --- a/docs/en/source/backend/lightx2v.md +++ /dev/null @@ -1,177 +0,0 @@ -# lightx2v Quantized Inference - -[lightx2v](https://github.com/ModelTC/lightx2v) is an efficient backend designed specifically to meet the inference demands of video generation models. By optimizing memory management and computational efficiency, it significantly accelerates the inference process. - -**LLMC** supports exporting quantized model formats required by **lightx2v** and offers strong support for multiple quantization algorithms (such as AWQ, GPTQ, SmoothQuant, etc.), maintaining high quantization accuracy while improving inference speed. Combining **LLMC** with **lightx2v** enables accelerated inference and memory optimization without compromising accuracy, making it ideal for scenarios that require efficient video model processing. - ---- - -## 1.1 Environment Setup - -To use **lightx2v** for quantized inference, first install and configure the environment: - -```bash -# Clone the repository and its submodules -git clone https://github.com/ModelTC/lightx2v.git lightx2v && cd lightx2v -git submodule update --init --recursive - -# Create and activate the conda environment -conda create -n lightx2v python=3.11 && conda activate lightx2v -pip install -r requirements.txt - -# Reinstall transformers separately to bypass version conflicts -pip install transformers==4.45.2 - -# Install flash-attention 2 -cd lightx2v/3rd/flash-attention && pip install --no-cache-dir -v -e . - -# Install flash-attention 3 (only if using Hopper architecture) -cd lightx2v/3rd/flash-attention/hopper && pip install --no-cache-dir -v -e . -``` - ---- - -## 1.2 Quantization Formats - -**lightx2v** supports several fixed-point quantization formats: - -- **W8A8**: int8 for weights and activations. -- **FP8 (E4M3)**: float8 for weights and activations. -- **Weight per-channel quantization**. -- **Activation per-token dynamic quantization** for improved precision. -- **Symmetric quantization** for both weights and activations (uses only scale). - -When using **LLMC** to quantize models, ensure the bit-width of weights and activations matches supported **lightx2v** formats. - ---- - -## 1.3 Quantizing Models with LLMC - -### 1.3.1 Calibration Data - -For example, for the Wan2.1 model on the I2V task, a calibration dataset is provided in the [directory](https://github.com/ModelTC/llmc/tree/main/assets/wan_i2v/calib). Users can add more samples as needed. - -### 1.3.2 Choosing Quantization Algorithm - -#### **W8A8** - -We recommend using **SmoothQuant** for W8A8 settings. -Refer to the SmoothQuant W8A8 [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/video_gen/wan_i2v/smoothquant_w_a.yaml): - -```yaml -quant: - video_gen: - method: SmoothQuant - weight: - bit: 8 - symmetric: True - granularity: per_channel - act: - bit: 8 - symmetric: True - granularity: per_token - special: - alpha: 0.75 -``` - -If SmoothQuant does not meet the precision requirement, use **AWQ** for better accuracy. See the corresponding [configuration](https://github.com/ModelTC/llmc/tree/main/configs/quantization/video_gen/wan_i2v/awq_w_a.yaml). - -#### **FP8-Dynamic** - -LLMC supports FP8 quantization with per-channel weights and per-token dynamic activations. SmoothQuant is again recommended. See the SmoothQuant FP8 [configuration](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/lightx2v/fp8/awq_fp8.yml): - -```yaml -quant: - video_gen: - method: SmoothQuant - weight: - quant_type: float-quant - bit: e4m3 - symmetric: True - granularity: per_channel - use_qtorch: True - act: - quant_type: float-quant - bit: e4m3 - symmetric: True - granularity: per_token - use_qtorch: True - special: - alpha: 0.75 -``` - -Ensure `quant_type` is set to `float-quant` and `use_qtorch` to `True`, as **LLMC** uses [QPyTorch](https://github.com/Tiiiger/QPyTorch) for float quantization. - -Install QPyTorch with: - -```bash -pip install qtorch -``` - -### 1.3.3 Exporting the Quantized Model - -```yaml -save: - save_lightx2v: True - save_path: /path/to/save_for_lightx2v/ -``` - -Set `save_lightx2v` to `True`. LLMC will export weights as `torch.int8` or `torch.float8_e4m3fn` for direct loading in **lightx2v**, along with quantization parameters. - -### 1.3.4 Running LLMC - -Edit the config path in the run script and execute: - -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=sq_for_lightx2v -config=${llmc}/configs/quantization/video_gen/wan_i2v/smoothquant_w_a.yaml -``` - -After LLMC completes, the quantized model is saved to `save.save_path`. - -### 1.3.5 Evaluation - -For the I2V task with the Wan2.1 model, an evaluation dataset is provided [here](https://github.com/ModelTC/llmc/tree/main/assets/wan_i2v/eval). Set the following in the config file: - -```yaml -eval: - eval_pos: [fake_quant] - type: video_gen - name: i2v - download: False - path: ../assets/wan_i2v/eval/ - bs: 1 - target_height: 480 - target_width: 832 - num_frames: 81 - guidance_scale: 5.0 - output_video_path: ./output_videos_sq/ -``` - -LLMC will generate evaluation videos using the pseudo-quantized model. - ---- - -## 1.4 Inference with lightx2v - -### 1.4.1 Weight Structure Conversion - -After LLMC exports the model, convert its structure to match **lightx2v** requirements using the [conversion script](https://github.com/ModelTC/lightx2v/blob/main/examples/diffusers/converter.py): - -```bash -python converter.py -s /path/to/save_for_lightx2v/ -o /path/to/output/ -d backward -``` - -The converted model will be saved under `/path/to/output/`. - -### 1.4.2 Offline Inference - -Edit the [inference script](https://github.com/ModelTC/lightx2v/blob/main/scripts/run_wan_i2v_advanced_ptq.sh), set `model_path` to `/path/to/output/` and `lightx2v_path` to your local lightx2v path, then run: - -```bash -bash run_wan_i2v_advanced_ptq.sh -``` diff --git a/docs/en/source/backend/mlcllm.md b/docs/en/source/backend/mlcllm.md deleted file mode 100644 index b4f1d007e..000000000 --- a/docs/en/source/backend/mlcllm.md +++ /dev/null @@ -1,129 +0,0 @@ - -# MLC LLM Quantized Inference - -[MLC LLM](https://github.com/mlc-ai/mlc-llm) is a machine learning compiler and high-performance deployment engine specifically designed for large language models. Its mission is to enable everyone to develop, optimize, and deploy AI models natively on their platforms. - -**MLC LLM** supports directly loading real quantized models exported by **AutoAWQ**. Since **LLMC** is seamlessly integrated with **AutoAWQ**, **AutoAWQ** acts as a bridge between **LLMC** and **MLC LLM**, greatly simplifying the loading and deployment process of quantized models. - -## 1.1 Environment Setup - -To perform quantized inference using **MLC LLM**, you first need to install and configure the **MLC LLM** environment. For example, with CUDA 12.2: - -```bash -python -m pip install --pre -U -f https://mlc.ai/wheels mlc-llm-nightly-cu122 mlc-ai-nightly-cu122 -``` - -## 1.2 Quantization Format - -The quantization format is the same as in [**AutoAWQ**](https://llmc-en.readthedocs.io/en/latest/backend/autoawq.html). - -## 1.3 Using LLMC for Model Quantization - -### 1.3.1 Calibration Data - -In this section, we use **Pileval** and **Wikitext** as calibration datasets. For details on downloading and preprocessing calibration data, please refer to [this section](https://llmc-en.readthedocs.io/en/latest/configs.html). - -For actual use, it is recommended to use data from real deployment scenarios for offline quantization calibration. - -### 1.3.2 Choosing a Quantization Algorithm - -**W4A16** - -For W4A16 quantization settings, we recommend using the AWQ algorithm from LLMC. - -You can refer to the AWQ W4A16 weight quantization [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/mlcllm/awq_w4a16.yml): - -```yaml -# configs/quantization/backend/mlcllm/awq_w4a16.yml -quant: - method: Awq - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 - pack_version: gemm_pack - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -``` - -Please note that the `pack_version` parameter needs to be set to `gemm_pack`, which means int4 data is packed into `torch.int32`. **MLC LLM** supports loading integer weights corresponding to **AutoAWQ**'s `GEMM` kernel format. - -Additionally, if AWQ does not meet the accuracy requirements, other algorithms can be explored, such as [GPTQ](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/mlcllm/gptq_w4a16.yml). We also recommend the **AWQ+OmniQuant combined algorithm** introduced in [this section](https://llmc-en.readthedocs.io/en/latest/practice/awq_omni.html) to further improve accuracy. The corresponding [configuration files](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/mlcllm/w4a16_combin) are available for reference. - -### 1.3.3 Exporting Real Quantized Models - -```yaml -save: - save_mlcllm: True - save_path: /path/to/save_for_mlcllm_awq_w4/ -``` - -Make sure to set `save_mlcllm` to `True`. For **W4A16** quantization settings, LLMC will export the weights in `torch.int32` format, making it easy for **MLC LLM** to load, and will also export the quantization parameters. - -### 1.3.4 Running LLMC - -Modify the configuration file path in the script and run: - -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=awq_for_mlcllm -config=${llmc}/configs/quantization/backend/mlcllm/awq_w4a16.yml -``` - -After LLMC finishes running, the real quantized model will be stored in the `save.save_path` directory. - -## 1.4 Using MLC LLM for Inference - -### 1.4.1 Generate MLC Configuration - -The first step is to generate the **MLC LLM** configuration file. - -```bash -export LOCAL_MODEL_PATH=/path/to/llama2-7b-chat/ # Local model storage path -export MLC_MODEL_PATH=./dist/llama2-7b-chat-MLC/ # Path for storing the processed MLC model -export QUANTIZATION=q4f16_autoawq # Quantization option, LLMC currently only supports the q4f16_autoawq format -export CONV_TEMPLATE=llama-2 # Conversation template option - -mlc_llm gen_config $LOCAL_MODEL_PATH --quantization $QUANTIZATION --conv-template $CONV_TEMPLATE -o $MLC_MODEL_PATH -``` - -The configuration generation command takes in the local model path, the target path for **MLC LLM** output, the conversation template name in **MLC LLM**, and the quantization format. Here, the quantization option `q4f16_autoawq` represents using **AutoAWQ**'s `w4a16` quantization format, and the conversation template `llama-2` is the template for the **Llama-2** model in **MLC LLM**. - -### 1.4.2 Compile Model Library - -Here is an example command to compile the model library in **MLC LLM**: - -```bash -export MODEL_LIB=$MLC_MODEL_PATH/lib.so -mlc_llm compile $MLC_MODEL_PATH -o $MODEL_LIB -``` - -### 1.4.3 Convert Model Weights - -In this step, we convert the model weights to **MLC LLM** format. - -```bash -export LLMC_MODEL_PATH=/path/to/save_for_mlcllm_awq_w4/ # LLMC-exported real quantized model -mlc_llm convert_weight $LOCAL_MODEL_PATH --quantization $QUANTIZATION -o $MLC_MODEL_PATH --source-format awq --source $LLMC_MODEL_PATH/mlcllm_quant_model/model.safetensors -``` - -In the above model conversion process, replace `$LLMC_MODEL_PATH` with `save.save_path`. The `--source-format` parameter indicates that **LLMC** is passing **AutoAWQ** format weights to **MLC LLM**, and `--source` points to the real quantized tensor exported by **LLMC**, which is stored in `save.save_path`. The converted result will be stored in the output path specified by **MLC LLM** using the `-o` option, and can be used for **MLC LLM** inference. - -### 1.4.4 Running the MLC LLM Engine - -We provide an example of running the **MLC LLM** engine for inference [here](https://github.com/ModelTC/llmc/blob/main/examples/backend/mlcllm/infer_with_mlcllm.py). - -Replace the `model_path` in the [example](https://github.com/ModelTC/llmc/blob/main/examples/backend/mlcllm/infer_with_mlcllm.py) with the output path of **MLC LLM**, then run the following command to complete the inference: - -```bash -cd examples/backend/mlcllm - -python infer_with_mlcllm.py -``` diff --git a/docs/en/source/backend/sglang.md b/docs/en/source/backend/sglang.md deleted file mode 100644 index adac59f59..000000000 --- a/docs/en/source/backend/sglang.md +++ /dev/null @@ -1,219 +0,0 @@ - -# SGLang Quantized Inference - -[SGLang](https://github.com/sgl-project/sglang) is a fast-serving framework for large language models and vision-language models. By co-designing the backend runtime and frontend language, it makes interactions with models faster and more controllable. - -## 1.1 Environment Setup - -To use SGLang for quantized inference, you first need to install and configure the SGLang environment: -```bash -pip install --upgrade pip -pip install "sglang[all]" - -# Install FlashInfer CUDA kernels -pip install flashinfer -i https://flashinfer.ai/whl/cu121/torch2.4/ -``` - -## 1.2 Quantization Format - -Same as [**VLLM**](https://llmc-en.readthedocs.io/en/latest/backend/vllm.html). - -## 1.3 Using LLMC for Model Quantization - -### 1.3.1 Calibration Data - -In this section, we use the **Plieval** and **Wikitext** academic datasets as calibration data. For downloading and preprocessing calibration data, please refer to [this section](https://llmc-en.readthedocs.io/en/latest/configs.html). - -For real use cases, it is recommended to use real deployment scenario data for offline quantization calibration. - -### 1.3.2 Choosing a Quantization Algorithm - -**W8A16** - -Under the W8A16 quantization setting, the accuracy of large language models generally does not show significant issues. In this case, we recommend using the simplest RTN (Round to Nearest) algorithm, which does not require additional calibration steps and runs quickly. - -The specific implementation can be found in the RTN W8A16 weight quantization [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/sglang/rtn_w8a16.yml). - -```yaml -# configs/quantization/backend/sglang/rtn_w8a16.yml -quant: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_group - group_size: 128 - need_pack: True -``` -Please note that in this step, the `need_pack` parameter must be set to `True`, which will "pack" the 8-bit weights into the `torch.int32` format for SGLang to directly load for inference. - -**W4A16** - -Under the W4A16 quantization setting, RTN (Round to Nearest) cannot ensure accuracy, so higher-order quantization algorithms are required to maintain model accuracy. In this case, we recommend using the **AWQ** algorithm from **LLMC**. - -The specific implementation can be found in the AWQ W4A16 weight quantization [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/sglang/awq_w4a16.yml). - -```yaml -# configs/quantization/backend/sglang/awq_w4a16.yml -quant: - method: Awq - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 - need_pack: True - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -``` -Please note that in this step, the `need_pack` parameter must be set to `True`, which will "pack" the 4-bit weights into the `torch.int32` format for **SGlang** to directly load for inference. - -Additionally, if AWQ does not meet accuracy requirements, we recommend using the **AWQ + OmniQuant** combined algorithm as introduced in [this section](https://llmc-en.readthedocs.io/en/latest/practice/awq_omni.html) to further improve accuracy. The corresponding [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/sglang/w4a16_combin) is also provided. - -**W8A8** - -Under the W8A8 quantization setting, we also recommend using the AWQ algorithm. AWQ generally outperforms SmoothQuant and OS+ in most cases, providing better quantization accuracy. - -The specific implementation can be found in the AWQ W8A8 [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/sglang/awq_w8a8.yml). - -```yaml -# configs/quantization/backend/sglang/awq_w8a8.yml -quant: - method: Awq - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -``` - -Additionally, if AWQ does not meet accuracy requirements, we recommend using the **Quarot + GPTQ** combined algorithm as introduced in [this section](https://llmc-en.readthedocs.io/en/latest/practice/quarot_gptq.html) to further improve accuracy. The corresponding [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/sglang/w8a8_combin) is also provided. - - -**FP8-Dynamic** - -In FP8 quantization, **LLMC** supports weight quantization per-channel and activation quantization dynamically per-token. In this case, the RTN (Round to Nearest) algorithm is sufficient. However, we recommend using the AWQ algorithm for better quantization accuracy. For implementation details, refer to the AWQ FP8 [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/sglang/fp8/awq_fp8.yml). - -```yaml -# configs/quantization/backend/sglang/fp8/awq_fp8.yml -quant: - method: Awq - quant_type: float_quant - weight: - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_channel - use_qtorch: True - act: - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_token - use_qtorch: True - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -``` - -Ensure that `quant_type` is set to `float_quant` to indicate floating-point quantization. Additionally, set `use_qtorch` to `True`, as **LLMC**'s FP8 implementation depends on certain functionalities from the [QPyTorch](https://github.com/Tiiiger/QPyTorch) library. - -Install [QPyTorch](https://github.com/Tiiiger/QPyTorch) with the following command: - -```bash -pip install qtorch -``` - -**FP8-Static** - -In FP8 quantization, **LLMC** also supports weight quantization per-tensor and activation quantization statically per-tensor. In this case, we recommend using the AWQ algorithm while adjusting the activation ranges. Refer to the AWQ FP8 static quantization [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/sglang/fp8/awq_fp8_static.yml). - -```yaml -# configs/quantization/backend/sglang/fp8/awq_fp8_static.yml -quant: - method: Awq - quant_type: float-quant - weight: - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_tensor - use_qtorch: True - act: - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_tensor - use_qtorch: True - static: True -``` - -### 1.3.3 Exporting Real Quantized Model - -```yaml -save: - save_sgl: True - save_path: /path/to/save_for_sglang_rtn_w8a16/ -``` -Please note that you must set `save_sgl` to `True`. For **W4A16** and **W8A16** quantization settings, LLMC will "pack" the weights into `torch.int32` format for direct loading by SGlang, while also exporting the quantization parameters. - -For the **W8A8** quantization setting, LLMC will quantize the weights into `torch.int8` format for direct loading by SGlang, and export the relevant quantization parameters as well. - -### 1.3.4 Running LLMC - -Modify the configuration file path in the script and run: - -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=rtn_for_sglang -config=${llmc}/configs/quantization/backend/sglang/rtn_w8a16.yml -``` -Once LLMC finishes running, the quantized model will be stored at the `save.save_path` location. - -## 1.4 Using Sglang for Inference - -### 1.4.1 Inference Service - -By default, it will start the server at http://localhost:10000. Replace `model_path` with the `quantized model` saved under the `save.save_path`. - -Start the service: - -```bash -python -m sglang.launch_server --model-path model_path -``` - -Call the service: - -```bash -curl http://localhost:10000/generate -H "Content-Type: application/json" -d '{ - "text": "Once upon a time,", - "sampling_params": { - "max_new_tokens": 16, - "temperature": 0 - } - }' -``` - -Additionally, we have built an [example](https://github.com/ModelTC/llmc/blob/main/examples/backend/sglang/infer_with_sglang.py) that uses **SGLang** for inference. - -```bash -cd examples/backend/sglang - -python infer_with_sglang.py \ No newline at end of file diff --git a/docs/en/source/backend/vllm.md b/docs/en/source/backend/vllm.md deleted file mode 100644 index 78ca00a92..000000000 --- a/docs/en/source/backend/vllm.md +++ /dev/null @@ -1,235 +0,0 @@ - -# VLLM Quantized Inference - -[VLLM](https://github.com/vllm-project/vllm) is an efficient backend specifically designed to meet the inference needs of large language models. By optimizing memory management and computational efficiency, it significantly speeds up the inference process. - -**LLMC** supports exporting quantized model formats required by **VLLM** and, through its strong multi-algorithm support (such as AWQ, GPTQ, QuaRot, etc.), can maintain high quantization accuracy while ensuring inference speed. The combination of **LLMC** and **VLLM** enables users to achieve inference acceleration and memory optimization without sacrificing accuracy, making it ideal for scenarios requiring efficient handling of large-scale language models. - -## 1.1 Environment Setup - -To use **VLLM** for quantized inference, first, install and configure the **VLLM** environment: - -```bash -pip install vllm -``` - -## 1.2 Quantization Formats - -In **VLLM**'s fixed-point integer quantization, the following common formats are supported: - -- **W4A16**: Weights are int4, activations are float16. -- **W8A16**: Weights are int8, activations are float16. -- **W8A8**: Weights are int8, activations are int8. -- **FP8 (E4M3, E5M2)**: Weights are float8, activations are float8. -- **Per-channel/group weight quantization**: Quantization applied per channel or group. -- **Per-tensor weight quantization**: Quantization applied per tensor. -- **Per-token dynamic activation quantization**: Dynamic quantization for each token to further improve precision. -- **Per-tensor static activation quantization**: Static quantization for each tensor to enhance efficiency. -- **Symmetric weight/activation quantization**: Quantization parameters include scale. - -Therefore, when quantizing models with **LLMC**, make sure that the bit settings for weights and activations are in formats supported by **VLLM**. - -## 1.3 Using LLMC for Model Quantization - -### 1.3.1 Calibration Data - -In this chapter, we use the **Pileval** and **Wikitext** academic datasets as calibration data. For downloading and preprocessing calibration data, refer to [this chapter](https://llmc-en.readthedocs.io/en/latest/configs.html). - -In practical use, we recommend using real deployment data for offline quantization calibration. - -### 1.3.2 Choosing a Quantization Algorithm - -**W8A16** - -In the W8A16 quantization setting, large language models typically do not experience significant accuracy degradation. In this case, we recommend using the simple RTN (Round to Nearest) algorithm, which does not require additional calibration steps and runs quickly. - -You can refer to the RTN W8A16 weight quantization [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/vllm/rtn_w8a16.yml). - -```yaml -# configs/quantization/backend/vllm/rtn_w8a16.yml -quant: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_group - group_size: 128 - need_pack: True -``` - -Make sure to set the `need_pack` parameter to `True`, which packs 8-bit weights into `torch.int32` format for direct **VLLM** loading and inference. - -**W4A16** - -In the W4A16 quantization setting, RTN (Round to Nearest) cannot ensure accuracy, so higher-order quantization algorithms are needed to maintain model accuracy. In this case, we recommend using the AWQ algorithm from **LLMC**. - -You can refer to the AWQ W4A16 weight quantization [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/vllm/awq_w4a16.yml). - -```yaml -# configs/quantization/backend/vllm/awq_w4a16.yml -quant: - method: Awq - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 - need_pack: True - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -``` - -Make sure to set the `need_pack` parameter to `True`, which packs 4-bit weights into `torch.int32` format for direct **VLLM** loading and inference. - -If AWQ cannot meet accuracy requirements, we recommend using the **AWQ + OmniQuant combination algorithm** described in [this chapter](https://llmc-en.readthedocs.io/en/latest/practice/awq_omni.html) to further improve accuracy. The corresponding [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/vllm/w4a16_combin) is also provided. - -**W8A8** - -In the W8A8 quantization setting, we also recommend using the AWQ algorithm. AWQ generally outperforms SmoothQuant and OS+ in most cases, providing better quantization accuracy. - -You can refer to the AWQ W8A8 quantization [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/vllm/awq_w8a8.yml). - -```yaml -# configs/quantization/backend/vllm/awq_w8a8.yml -quant: - method: Awq - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -``` - -If AWQ cannot meet accuracy requirements, we recommend using the **Quarot + GPTQ combination algorithm** described in [this chapter](https://llmc-en.readthedocs.io/en/latest/practice/quarot_gptq.html) to further improve accuracy. The corresponding [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/vllm/w8a8_combin) is also provided. - -**FP8-Dynamic** - -In FP8 quantization, **LLMC** supports weight quantization per-channel and activation quantization dynamically per-token. In this case, the RTN (Round to Nearest) algorithm is sufficient. However, we recommend using the AWQ algorithm for better quantization accuracy. For implementation details, refer to the AWQ FP8 [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/vllm/fp8/awq_fp8.yml). - -```yaml -# configs/quantization/backend/vllm/fp8/awq_fp8.yml -quant: - method: Awq - quant_type: float_quant - weight: - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_channel - use_qtorch: True - act: - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_token - use_qtorch: True - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -``` - -Ensure that `quant_type` is set to `float_quant` to indicate floating-point quantization. Additionally, set `use_qtorch` to `True`, as **LLMC**'s FP8 implementation depends on certain functionalities from the [QPyTorch](https://github.com/Tiiiger/QPyTorch) library. - -Install [QPyTorch](https://github.com/Tiiiger/QPyTorch) with the following command: - -```bash -pip install qtorch -``` - -**FP8-Static** - -In FP8 quantization, **LLMC** also supports weight quantization per-tensor and activation quantization statically per-tensor. In this case, we recommend using the AWQ algorithm while adjusting the activation ranges. Refer to the AWQ FP8 static quantization [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/vllm/fp8/awq_fp8_static.yml). - -```yaml -# configs/quantization/backend/vllm/fp8/awq_fp8_static.yml -quant: - method: Awq - quant_type: float-quant - weight: - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_tensor - use_qtorch: True - act: - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_tensor - use_qtorch: True - static: True -``` - -### 1.3.3 Exporting Real Quantized Model - -```yaml -save: - save_vllm: True - save_path: /path/to/save_for_vllm_rtn_w8a16/ -``` - -Make sure to set `save_vllm` to `True`. For **W4A16** and **W8A16** quantization settings, **LLMC** will export the weights in `torch.int32` format for direct **VLLM** loading, and it will also export the quantization parameters. - -For **W8A8** quantization settings, **LLMC** will export the weights in `torch.int8` format for direct **VLLM** loading, along with the relevant quantization parameters. - -### 1.3.4 Running LLMC - -Modify the configuration file path in the run script and execute: - -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=rtn_for_vllm -config=${llmc}/configs/quantization/backend/vllm/rtn_w8a16.yml -``` - -After **LLMC** finishes running, the real quantized model will be stored at the `save.save_path`. - -## 1.4 Using VLLM for Inference - -### 1.4.1 Offline Inference - -We have provided an [example](https://github.com/ModelTC/llmc/blob/main/examples/backend/vllm/infer_with_vllm.py) for performing offline batch inference on a dataset using **VLLM**. You only need to replace the `model_path` in the [example](https://github.com/ModelTC/llmc/blob/main/examples/backend/vllm/infer_with_vllm.py) with the `save.save_path` path, and then run the following command: - -```bash -cd examples/backend/vllm - -python infer_with_vllm.py -``` - -### 1.4.2 Inference Service - -vLLM can be deployed as a server that implements the OpenAI API protocol. This allows vLLM to be used as a drop-in replacement for applications using the OpenAI API. By default, it starts the server at http://localhost:8000. You can specify the address with `--host` and `--port` arguments. Replace `model_path` with the saved `quantized model`. - -Start the server: - -``` -vllm serve model_path -``` - -Query the server: - -``` -curl http://localhost:8000/v1/completions -H "Content-Type: application/json" -d '{ - "model": "model_path", - "prompt": "What is the AI?", - "max_tokens": 128, - "temperature": 0 -}' -``` diff --git a/docs/en/source/conf.py b/docs/en/source/conf.py deleted file mode 100644 index 7e78c0fb8..000000000 --- a/docs/en/source/conf.py +++ /dev/null @@ -1,114 +0,0 @@ -# Configuration file for the Sphinx documentation builder. -# -# This file adopts the theme and basic settings used by the Lightx2v docs -# but keeps the llmc-specific information from the original configuration. -# ----------------------------------------------------------------------------- - -import os -import sys -from typing import List - -# -- Path setup -------------------------------------------------------------- -# Add project root (two levels up) so autodoc can find the modules. -ROOT_DIR = os.path.abspath(os.path.join(__file__, "../../..")) -sys.path.append(ROOT_DIR) - -# -- Project information ----------------------------------------------------- -project = "llmc" -copyright = "2024, llmc contributors" -author = "ModelTC" -release = "1.0.0" - -# GitHub repository ---------------------------------------------------------- -github_url = "https://github.com/ModelTC/llmc" - -html_context = { - "display_github": True, - "github_user": author, - "github_repo": "llmc", - "github_version": "main", - "conf_py_path": "/docs/en/source/", # Path in the checkout to the docs root -} - -# -- General configuration --------------------------------------------------- - -extensions = [ - "sphinx.ext.napoleon", - "sphinx.ext.viewcode", - "sphinx.ext.intersphinx", - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "myst_parser", - "sphinx_copybutton", - "sphinx.ext.doctest", - "sphinx.ext.mathjax", - "sphinx.ext.ifconfig", - "sphinx.ext.githubpages", - "sphinx.ext.autosectionlabel", - "sphinxcontrib.katex", - "sphinxcontrib.contentui", -] - -templates_path: List[str] = ["_templates"] -exclude_patterns: List[str] = [] - -language = "en" - -# Exclude the prompt "$" when copying code blocks -------------------------- -copybutton_prompt_text = r"\$ " -copybutton_prompt_is_regexp = True - -# -- Options for HTML output ------------------------------------------------- -html_title = project -html_theme = "sphinx_book_theme" -html_logo = "images/logo/llmc.svg" -html_static_path = ["_static"] - -# Theme options compatible with sphinx_book_theme / pydata-sphinx-theme -html_theme_options = { - "path_to_docs": "docs/en/source", - "repository_url": github_url, - "use_repository_button": True, - "logo": { - "text": "LLMC", - "image_light": "images/logo/llmc.svg", - "image_dark": "images/logo/llmc.svg", - }, - "doc_items": { - "paper": "https://arxiv.org/abs/2405.06001", - "institution": "https://github.com/ModelTC", - }, -} - -# -- Intersphinx mapping (optional) ----------------------------------------- -intersphinx_mapping = { - "python": ("https://docs.python.org/3", {}), - "sphinx": ("https://www.sphinx-doc.org/en/master", {}), -} - -# -- Mock heavy external dependencies --------------------------------------- -autodoc_mock_imports = [ - "torch", - "transformers", - "sentencepiece", - "tensorizer", -] - -# Remove base-class note in generated docs ---------------------------------- -from sphinx.ext import autodoc # noqa: E402, isort: skip - -class MockedClassDocumenter(autodoc.ClassDocumenter): - """Remove note about base class when a class is derived from object.""" - - def add_line(self, line: str, source: str, *lineno: int) -> None: - if line == " Bases: :py:class:`object`": - return - super().add_line(line, source, *lineno) - -autodoc.ClassDocumenter = MockedClassDocumenter - -# -- Customisation hooks ----------------------------------------------------- - -def setup(app): - """Optional Sphinx setup hooks.""" - pass diff --git a/docs/en/source/configs.md b/docs/en/source/configs.md deleted file mode 100644 index b7446df92..000000000 --- a/docs/en/source/configs.md +++ /dev/null @@ -1,426 +0,0 @@ -# Configs' brief description - -All configurations can be found [here](https://github.com/ModelTC/llmc/tree/main/configs) - -Here's a brief config example - -``` -base: - seed: &seed 42 # Set random seed -model: - type: model_type # Type of the model - path: model path # Path to the model - tokenizer_mode: fast # Type of the model's tokenizer - torch_dtype: auto # Data type of the model -calib: - name: pileval # Name of the calibration dataset - download: False # Whether to download the calibration dataset online - path: calib data path # Path to the calibration dataset - n_samples: 512 # Number of samples in the calibration dataset - bs: 1 # Batch size for the calibration dataset - seq_len: 512 # Sequence length for the calibration dataset - preproc: pileval_smooth # Preprocessing method for the calibration dataset - seed: *seed # Random seed for the calibration dataset -eval: - eval_pos: [pretrain, transformed, fake_quant] # Evaluation points - name: wikitext2 # Name of the evaluation dataset - download: False # Whether to download the evaluation dataset online - path: eval data path # Path to the evaluation dataset - bs: 1 # Batch size for the evaluation dataset - seq_len: 2048 # Sequence length for the evaluation dataset - eval_token_consist: False # Whether to evaluate the consistency of tokens between the quantized and original models -quant: - method: SmoothQuant # Compression method - weight: - bit: 8 # Number of quantization bits for weights - symmetric: True # Whether weight quantization is symmetric - granularity: per_channel # Granularity of weight quantization - act: - bit: 8 # Number of quantization bits for activations - symmetric: True # Whether activation quantization is symmetric - granularity: per_token # Granularity of activation quantization - speical: # Special parameters required for the quantization algorithm. Refer to the comments in the configuration file and the original paper for usage. -save: - save_vllm: False # Whether to save the real quantized model for VLLM inference - save_sgl: False # Whether to save the real quantized model for Sglang inference - save_autoawq: False # Whether to save the real quantized model for AutoAWQ inference - save_mlcllm: False # Whether to save the real quantized model for MLC-LLM inference - save_trans: False # Whether to save the model after weight transformation - save_fake: False # Whether to save the fake quantized weights - save_path: /path/to/save # Save path -``` - -# Configs' detailed description - -## base - - base.seed - -Set Random Seed, which is used to set all random seeds for the entire frame - -## model - - model.type - -The type of model, which can support Llama, Qwen2, Llava, Gemma2 and other models, you can check all the models supported by llmc from [here](https://github.com/ModelTC/llmc/blob/main/llmc/models/__init__.py). - - model.path - -Currently, LLMC only supports models in Hugging Face format, and you can use the following code to check whether the model can be loaded normally. - -``` -from transformers import AutoModelForCausalLM, AutoConfig - - -model_path = # model path -model_config = AutoConfig.from_pretrained( - model_path, trust_remote_code=True -) -model = AutoModelForCausalLM.from_pretrained( - model_path, - config=model_config, - trust_remote_code=True, - torch_dtype="auto", - low_cpu_mem_usage=True, -) - -print(model) -``` -If the above code does not load the model you give, may be: - -1. Your model format is not hugging face format - -2. Your version of tansformers is too low and you can execute `pip install transformers --upgrade` to upgrade it. - -Before llmc runs, make sure that the above code can load your model successfully, otherwise llmc will not be able to load your model. - - model.tokenizer_mode - -Choose whether to use a Slow or Fast tokenizer - - model.torch_dtype - -You can set the data types of model weights: - -1. auto - -2. torch.float16 - -3. torch.bfloat16 - -3. torch.float32 - -where auto will follow the original data type setting of the weight file - -## calib - - calib.name - -The name of the calibration dataset. Currently supported by the following types of calibration datasets: - -1. pileval - -2. wikitext2 - -3. c4 - -4. ptb - -5. custom - -where custom indicates the use of user-defined calibration datasets, refer to the [Custom Calibration Dataset section](https://llmc-en.readthedocs.io/en/latest/advanced/custom_dataset.html) of the advanced usage document for specific instructions - - calib.download - -Indicates whether the calibration dataset needs to be downloaded online at runtime - -If you set True, you do not need to set calib.path, llmc will automatically download the dataset online - -If you set False, you need to set calib.path, and llmc will read the dataset from this address, and you don't need to run llmc on the Internet - - calib.path - -If calib.download is set to False, you need to set calib.path, which indicates the path where the calibration dataset is stored - -The data stored in this path must be a dataset in arrow format - -To download the dataset in Arrow format from Hugging Face, you can use the following code -``` -from datasets import load_dataset -calib_dataset = load_dataset(...) -calib_dataset.save_to_disk(...) -``` -Load datasets in that format can be used -``` -from datasets import load_from_disk -data = load_from_disk(...) -``` -The LLMC has provided a download script for the above dataset - -The calibration dataset can be downloaded [here](https://github.com/ModelTC/llmc/blob/main/tools/download_calib_dataset.py). - -The execution command is `python download_calib_dataset.py --save_path [calib dataset save path]` - -The test dataset can be downloaded [here](https://github.com/ModelTC/llmc/blob/main/tools/download_eval_dataset.py). - - The execution command is `python download_eval_dataset.py --save_path [eval dataset save path]` - -If you want to use more datasets, you can refer to the download method of the arrow format dataset above and modify it yourself - - calib.n_samples - -Select n_samples pieces of data for calibration - - calib.bs - -Set the calibration data to calib.bs as the batch size, if it is -1, all the data is packaged into a batch of data - - calib.seq_len - -The sequence length of the calibration data - - calib.preproc - -The preprocessing methods of calibration data are currently implemented by llmc in a variety of preprocessing methods - -1. wikitext2_gptq - -2. ptb_gptq - -3. c4_gptq - -4. pileval_awq - -5. pileval_smooth - -6. pileval_omni - -7. general - -8. random_truncate_txt - -With the exception of general, the rest of the preprocessing can be found [here](https://github.com/ModelTC/llmc/blob/main/llmc/data/dataset/specified_preproc.py) - -general is implemented in the general_preproc function in the [base_dataset](https://github.com/ModelTC/llmc/blob/main/llmc/data/dataset/base_dataset.py) - - calib.seed - -The random seed in the data preprocessing follows the base.seed setting by default - - -## eval - - eval.eval_pos - -Indicates the eval positions, and currently supports three positions that can be evaluated - -1. pretrain - -2. transformed - -3. fake_quant - -eval_pos need to give a list, the list can be empty, and an empty list means that no tests are being performed - - eval.name - -The name of the eval dataset is supported by the following types of test datasets: - -1. wikitext2 - -2. c4 - -3. ptb - -For details about how to download the test dataset, see calib.name calibration dataset - - eval.download - -Indicates whether the eval dataset needs to be downloaded online at runtime, see calib.download - - eval.path - -Refer to calib.path - - eval.bs - -Eval batch size - - eval.seq_len - -The sequence length of the eval data - - eval.inference_per_block - -If your model is too large and the gpu memory of a single card cannot cover the entire model during the eval, then you need to open the inference_per_block for inference, and at the same time, on the premise of not exploding the gpu memory, appropriately increase the bs to improve the inference speed. - -Here's a config example -``` -bs: 10 -inference_per_block: True -``` - - Eval multiple datasets at the same time - -LLMC also supports the simultaneous evaluation of multiple datasets - -Below is an example of evaluating a single wikitext2 dataset - -``` -eval: - name: wikitext2 - path: wikitext2 path -``` - -Here's an example of evaluating multiple datasets - -``` -eval: - name: [wikitext2, c4, ptb] - path: The common upper directory of these data sets -``` - -It should be noted that the names of multiple dataset evaluations need to be represented in the form of a list, and the following directory rules need to be followed - - -- upper-level directory - - wikitext2 - - c4 - - ptb - -If you use the LLMC [download script](https://github.com/ModelTC/llmc/blob/main/tools/download_eval_dataset.py) directly, the shared upper-level directory is the `--save_path` specified dataset storage path - - - - -## quant - - quant.method - -The names of the quantization algorithms used, and all the quantization algorithms supported by the LLMC, can be viewed [here](https://github.com/ModelTC/llmc/blob/main/llmc/compression/quantization/__init__.py). - - - quant.weight - -Quantization settings for weights - - quant.weight.bit - -The quantized number of bits of the weight - - quant.weight.symmetric - -Quantitative symmetry of weights - - quant.weight.granularity - -The quantification granularity of the weights supports the following granularities - -1. per tensor - -2. per channel - -3. per group - - quant.act - -Activated quantization settings - - quant.act.bit - -Activated quantized bit digits - - quant.act.symmetric - -Quantified symmetry or not - - quant.act.granularity - -The quantization granularity of the activation supports the following granularities - -1. per tensor - -2. per token - -3. per head - -If quant.method is set to RTN, activating quantization can support static per tensor settings, and the following is a W8A8 configuration that activates static per tensor quantization - -``` -quant: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_channel - act: - bit: 8 - symmetric: True - granularity: per_tensor - static: True -``` - -## sparse - - sparse.method - -The name of the sparsification algorithm used. This includes both [model sparsification](https://github.com/ModelTC/LightCompress/blob/main/llmc/compression/sparsification/__init__.pyn) and [reduction](https://github.com/ModelTC/LightCompress/blob/main/llmc/compression/token_reduction/__init__.py) of visual tokens. All supported algorithms can be found in the corresponding files. - -It’s worth noting that for model sparsification, you need to specify the exact algorithm name, whereas for token reduction, you only need to set it to `TokenReduction` first, and then specify the exact algorithm under `special`. - -```yaml -sparse: - method: Wanda -``` - -```yaml -sparse: - method: TokenReduction - special: - method: FastV -``` - -## save - - save.save_vllm - -Whether to save as a [VLLM](https://github.com/vllm-project/vllm) inference backend-supported real quantized model. - -When this option is enabled, the saved model weights will significantly shrink (real quantization), and it can be directly loaded for inference using the VLLM backend. This improves inference speed and reduces memory usage. For more details on the [VLLM](https://github.com/vllm-project/vllm) inference backend, refer to [this section](https://llmc-en.readthedocs.io/en/latest/backend/vllm.html#). - - save.save_sgl - -Whether to save as a [Sglang](https://github.com/sgl-project/sglang) inference backend-supported real quantized model. - -When this option is enabled, the saved model weights will significantly shrink (real quantization), and it can be directly loaded for inference using the [Sglang](https://github.com/sgl-project/sglang) backend. This improves inference speed and reduces memory usage. For more details on the [Sglang](https://github.com/sgl-project/sglang) inference backend, refer to [this section](https://llmc-en.readthedocs.io/en/latest/backend/sglang.html). - - save.save_autoawq - -Whether to save as an [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) inference backend-supported real quantized model. - -When this option is enabled, the saved model weights will significantly shrink (real quantization), and it can be directly loaded for inference using the [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) backend. This improves inference speed and reduces memory usage. For more details on the [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) inference backend, refer to [this section](https://llmc-en.readthedocs.io/en/latest/backend/autoawq.html). - - save.save_mlcllm - -Whether to save as an [MLC-LLM](https://github.com/mlc-ai/mlc-llm) inference backend-supported real quantized model. - -When this option is enabled, the saved model weights will significantly shrink (real quantization), and it can be directly loaded for inference using the [MLC-LLM](https://github.com/mlc-ai/mlc-llm) backend. This improves inference speed and reduces memory usage. For more details on the [MLC-LLM](https://github.com/mlc-ai/mlc-llm) inference backend, refer to [this section](https://llmc-en.readthedocs.io/en/latest/backend/mlcllm.html). - - save.save_trans - -Whether to save the adjusted model weights. - -The saved weights are adjusted to be more suitable for quantization, possibly containing fewer outliers. They are still saved in fp16/bf16 format (with the same file size as the original model). When deploying the model in the inference engine, the engine's built-in `naive quantization` needs to be used to achieve quantized inference. - -Unlike `save_vllm` and similar options, this option requires the inference engine to perform real quantization, while `llmc` provides a floating-point model weight that is more suitable for quantization. - -For example, the `save_trans` models exported by algorithms such as `SmoothQuant, Os+, AWQ, and Quarot` have `fewer outliers` and are more suitable for quantization. - - - save.save_fake - -Whether to save the fake quantized model. - - save.save_path - -The path where the model is saved. This path must be a new, non-existent directory, otherwise, LLMC will terminate the run and issue an appropriate error message. \ No newline at end of file diff --git a/docs/en/source/images/logo/llmc.svg b/docs/en/source/images/logo/llmc.svg deleted file mode 100644 index f1cb2bbf7..000000000 --- a/docs/en/source/images/logo/llmc.svg +++ /dev/null @@ -1,100 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/en/source/index.rst b/docs/en/source/index.rst deleted file mode 100644 index 2d702e671..000000000 --- a/docs/en/source/index.rst +++ /dev/null @@ -1,56 +0,0 @@ -.. llmc documentation master file, created by - sphinx-quickstart on Mon Jun 24 10:56:49 2024. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to llmc's documentation! -================================ - -llmc is a tool for large model compression, supporting a variety of models and a variety of compression algorithms. - -github: https://github.com/ModelTC/llmc - -arxiv: https://arxiv.org/abs/2405.06001 - -.. toctree:: - :maxdepth: 2 - :caption: Quick Start - - quickstart.md - - -.. toctree:: - :maxdepth: 2 - :caption: Configs - - configs.md - -.. toctree:: - :maxdepth: 2 - :caption: Advanced - - advanced/model_test_v1.md - advanced/model_test_v2.md - advanced/custom_dataset.md - advanced/Vit_quant&img_dataset.md - advanced/VLM_quant&img-txt_dataset.md - advanced/mix_bits.md - advanced/sparsification.md - advanced/token_reduction.md - -.. toctree:: - :maxdepth: 2 - :caption: Best Practice - - practice/awq.md - practice/awq_omni.md - practice/quarot_gptq.md - -.. toctree:: - :maxdepth: 2 - :caption: Backbend - - backend/vllm.md - backend/sglang.md - backend/autoawq.md - backend/mlcllm.md diff --git a/docs/en/source/practice/awq.md b/docs/en/source/practice/awq.md deleted file mode 100644 index 090e54fda..000000000 --- a/docs/en/source/practice/awq.md +++ /dev/null @@ -1,97 +0,0 @@ - -# AWQ - -## 1.1 Weight-only Quantization - -AWQ performs well in most weight-only quantization scenarios, but it performs poorly in `low-bit` quantization (especially `2-bit`). This is because AWQ uses a `symmetric strategy` for weight clipping regardless of whether symmetric or asymmetric quantization is used. - -In LLMC, we have improved the AWQ method by aligning its `weight clipping` strategy with the `quantization strategy`, such that `asymmetric quantization` uses `asymmetric clipping` and `symmetric quantization` uses `symmetric clipping`, resulting in better performance, especially in low-bit quantization. - -### 1.1.1 Algorithm Configuration - -The specific implementation can be found in the AWQ weight-only quantization [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/methods/Awq/awq_w_only.yml). - -```yaml -# configs/quantization/methods/Awq/awq_w_only.yml -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - # But their results don't differ significantly. - trans_version: v2 - weight_clip: True -``` - -### 1.1.2 Running the Algorithm - -Simply modify the configuration file path in the [run script](https://github.com/ModelTC/llmc/tree/main/scripts/run_llmc.sh) and execute: - -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=awq_w4a16 -config=${llmc}/configs/quantization/methods/Awq/awq_w_only.yml -``` - -With this improvement, AWQ-LLMC can achieve better accuracy compared to the [original method](https://github.com/mit-han-lab/llm-awq), especially showing significant improvement in 2-bit quantization. - -If `clip_sym` is not specified in `config.quant.special`, its value will default to the same as `config.quant.weight.symmetric`. If you want to reproduce academic-level accuracy, you can add `clip_sym` to the config and set it to `True`: - -```yaml -quant: - special: - clip_sym: True -``` - -## 1.2 Weight-Activation Quantization - -In addition, unlike the original method, AWQ in LLMC supports weight-activation quantization. Compared to [OS+](https://arxiv.org/abs/2304.09145) and [SmoothQuant](https://arxiv.org/abs/2211.10438), which only support scaling transformations for `ln` and `fc` layers, AWQ provides more options for equivalent transformation locations. - -AWQ also uses grid search to find the optimal scaling factor for weight transformations, thus often achieving better results in weight-activation quantization. - -### 1.2.1 Algorithm Configuration - -The specific implementation can be found in the AWQ weight-activation quantization [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/methods/Awq/awq_w_a.yml). - -```yaml -# configs/quantization/methods/Awq/awq_w_a.yml -quant: - method: Awq - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - trans_version: v2 - weight_clip: True -``` - -Simply modify the configuration file path in the [run script](https://github.com/ModelTC/llmc/tree/main/scripts/run_llmc.sh) and execute: - -### 1.2.2 Running the Algorithm - -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=awq_w8a8 -config=${llmc}/configs/quantization/methods/Awq/awq_w_a.yml -``` - -In weight-activation quantization, AWQ-LLMC can achieve better results than algorithms like SmoothQuant. diff --git a/docs/en/source/practice/awq_omni.md b/docs/en/source/practice/awq_omni.md deleted file mode 100644 index 92abcbe39..000000000 --- a/docs/en/source/practice/awq_omni.md +++ /dev/null @@ -1,144 +0,0 @@ - -# AWQ + OmniQuant - -OmniQuant uses **Learnable Weight Clipping (LWC)** and **Learnable Equivalent Transformation (LET)** to optimize quantized models, often achieving better performance compared to non-learning-based algorithms. However, due to instability during training and sensitivity to hyperparameters, OmniQuant requires significant time to fine-tune the hyperparameters. This not only increases training costs but can also lead to suboptimal results. - -To address these issues, we have improved OmniQuant in LLMC. We use AWQ to generate `clipping parameters` and `transformation parameters`, which are then used as initializations for OmniQuant's `LWC` and `LET`, respectively. This quality initialization significantly reduces OmniQuant's training time while improving its accuracy. - -## 1.1 Weight-only Quantization - -As an example of the `w4a16g128` setting, we provide a [configuration file combining AWQ and OmniQuant](https://github.com/ModelTC/llmc/tree/main/configs/quantization/combination/awq_comb_omni/w4a16g128). - -### 1.1.1 Run AWQ - -**Step One**, run the AWQ-related [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/combination/awq_comb_omni/w4a16g128/step_1_awq.yml). Note that in this step, you need to set the `save_trans` parameter to `True` to save the transformed model. - -```yaml -# configs/quantization/combination/awq_comb_omni/w4a16g128/step_1_awq.yml - -save: - # Save the AWQ-transformed model for OmniQuant. - save_trans: True - save_fake: False - save_path: /path/to/save_awq_trans/ -``` - -Run the script: -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=step_1_awq -config=${llmc}/configs/quantization/combination/awq_comb_omni/w4a16g128/step_1_awq.yml -``` - -### 1.1.2 Run OmniQuant - -**Step Two**, load the AWQ-transformed model and run the OmniQuant-related [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/combination/awq_comb_omni/w4a16g128/step_2_omniq.yml). In this step, set the `search_clip_init` parameter to `True` to initialize `LWC` using the `clipping parameters` generated by AWQ grid search. - -```yaml -# configs/quantization/combination/awq_comb_omni/w4a16g128/step_2_omniq.yml -model: - type: model_type - # Load AWQ-transformed model - path: /path/to/save_awq_trans/transformed_model - torch_dtype: auto -``` - -```yaml -quant: - special: - search_clip_init: True -``` - -Run the script: -```bash -# scripts/run_llmc.sh - -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=step_2_omni -config=${llmc}/configs/quantization/combination/awq_comb_omni/w4a16g128/step_2_omniq.yml -``` - -By running these two steps, LLMC can achieve better results in **weight-only quantization** compared to the original OmniQuant [paper](https://arxiv.org/abs/2308.13137). More importantly, LLMC only requires 5 epochs to achieve this effect, much less than the 20 or 40 epochs required in the [original paper](https://arxiv.org/abs/2308.13137), significantly reducing training time. - -Please note that in **weight-only quantization**, AWQ's `clipping parameters` and `transformation parameters` do not need to be stored for use by OmniQuant. Only a transformed model needs to be saved. This is because Learnable Equivalent Transformation (`LET`) mainly addresses the outlier phenomenon in activation quantization. Therefore, in weight-only quantization, OmniQuant does not need to use `LET`. At the same time, the use of AWQ's `clipping parameters` to initialize Learnable Weight Clipping (`LWC`) is automatically handled by OmniQuant in LLMC. - -## 1.2 Weight-Activation Quantization - -As an example of the `w8a8` setting, we provide a [configuration file combining AWQ and OmniQuant](https://github.com/ModelTC/llmc/tree/main/configs/quantization/combination/awq_comb_omni/w8a8). - -### 1.2.1 Run AWQ - - -**Step One**, run the AWQ-related [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/combination/awq_comb_omni/w8a8/step_1_awq.yml). Note that in this step, you need to set the `save_clip` and `save_scale` parameters to `True` to save the `clipping parameters` and `transformation parameters`. Also, make sure to use `learnable` as the weight calibration method since only `learnable` supports saving and loading of the `clipping parameters`. - -```yaml -# configs/quantization/combination/awq_comb_omni/w8a8/step_1_awq.yml -quant: - weight: - bit: 8 - symmetric: False - granularity: per_channel - group_size: -1 - calib_algo: learnable - act: - bit: 8 - symmetric: False - granularity: per_token - calib_algo: minmax -``` - -```yaml -save: - save_scale: True - scale_path: /path/to/scale - save_clip: True - clip_path: /path/to/clip -``` - -Run the script: -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=step_1_awq -config=${llmc}/configs/quantization/combination/awq_comb_omni/w8a8/step_1_awq.yml -``` - -### 1.2.2 Run OmniQuant - -**Step Two**, load the `clipping parameters` and `transformation parameters` generated by AWQ. In this step, the `clipping parameters` and `transformation parameters` generated by AWQ are loaded for initialization training in OmniQuant's `LWC` and `LET`. Run the OmniQuant-related [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/combination/awq_comb_omni/w8a8/step_2_omniq.yml). - -```yaml -# configs/quantization/combination/awq_comb_omni/w8a8/step_2_omniq.yml -quant: - special: - # Use AWQ's search clip factors to initialize OmniQuant's clip factors, - # Then refine them through learning (LWC). - search_clip_init: True - load_clip: True - clip_path: /path/to/scale - # Use AWQ's search scale factors to initialize OmniQuant's scale factors, - # Then refine them through learning (LET). - search_scale_init: True - scale_path: /path/to/clip -``` - -In this step, set both `search_scale_init` and `search_clip_init` to `True` to use the `clipping parameters` and `transformation parameters` generated by AWQ to initialize `LWC` and `LET`. - -Run the script: -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=step_2_omniq -config=${llmc}/configs/quantization/combination/awq_comb_omni/w8a8/step_2_omniq.yml -``` - -By running these two steps, LLMC can achieve better results in **weight-activation quantization** than those reported in the [original paper](https://arxiv.org/abs/2308.13137), and it only requires 5 epochs. \ No newline at end of file diff --git a/docs/en/source/practice/quarot_gptq.md b/docs/en/source/practice/quarot_gptq.md deleted file mode 100644 index 646467d38..000000000 --- a/docs/en/source/practice/quarot_gptq.md +++ /dev/null @@ -1,68 +0,0 @@ - -# QuaRot + GPTQ - -## 1.1 Weight-Activation Quantization - -QuaRot aims to optimize the quantization performance of large language models by introducing a `rotation matrix` (such as the `Hadamard transform`), enabling efficient weight-activation quantization across all parts of the model (including weights and activations). This technique smooths the distribution of activation values, eliminating `outliers` and simplifying the quantization process. - -However, due to the randomness of the rotation matrix used by QuaRot, the results tend to fluctuate. To address this issue, in LLMC, we can adopt the `QuaRot + GPTQ` combination strategy. By applying GPTQ to reconstruct quantized outputs on the weights transformed by QuaRot, we can fine-tune the weights to stabilize and improve the quantization results. (For detailed analysis, see our [paper](https://arxiv.org/abs/2405.06001v2)). - -Please note that running QuaRot requires support for the **Hadamard transform kernel**. The installation of this kernel can be referenced in this [repository](https://github.com/spcl/QuaRot). - -### 1.1.1 Running Quarot - -**Step One**, run the QuaRot-related [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/combination/quarot_comb_gptq/w8a8/step_1_quarot.yml). Note that in this step, the `save_trans` parameter must be set to `True` to save the transformed model. - -```yaml -# configs/quantization/combination/quarot_comb_gptq/w8a8/step_1_quarot.yml - -save: - # Save the QuaRot-transformed model. - save_trans: True - save_fake: False - save_path: /path/to/save_quarot_trans_for_gptq/ -``` - -Run the script: -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=step_1_quarot -config=${llmc}/configs/quantization/combination/quarot_comb_gptq/w8a8/step_1_quarot.yml -``` - -### 1.1.2 Running GPTQ - -**Step Two**, load the QuaRot-transformed model and run the GPTQ-related [configuration file](https://github.com/ModelTC/llmc/tree/main/configs/quantization/combination/quarot_comb_gptq/w8a8/step_2_gptq.yml). - -```yaml -# configs/quantization/combination/quarot_comb_gptq/w8a8/step_2_gptq.yml -model: - type: Llama - # Load QuaRot-transformed model - path: /path/to/save_quarot_trans_for_gptq/transformed_model - torch_dtype: auto -``` - -Run the script: -```bash -# scripts/run_llmc.sh - -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=step_2_gptq -config=${llmc}/configs/quantization/combination/quarot_comb_gptq/w8a8/step_2_gptq.yml -``` - -Please note that both QuaRot and GPTQ have an `online_rotate` option. Be sure to keep this option consistent across both configuration files. This option indicates whether to apply online rotation to activations, which can greatly improve accuracy but may hinder practical deployment. For more details on online rotation, please refer to the [original QuaRot paper](https://arxiv.org/abs/2404.00456). - -```yaml -quant: - special: - online_rotate: True -``` - -By following these two steps, LLMC can achieve better results in weight-activation quantization compared to using the QuaRot algorithm alone. diff --git a/docs/en/source/quickstart.md b/docs/en/source/quickstart.md deleted file mode 100644 index 303f26111..000000000 --- a/docs/en/source/quickstart.md +++ /dev/null @@ -1,137 +0,0 @@ - -# Installing LLMC - -``` -git clone https://github.com/ModelTC/llmc.git -cd llmc/ -pip install -r requirements.txt -``` - -# Preparing the Model - -**LLMC** currently supports only `hugging face` format models. For example, you can find the `Qwen2-0.5B` model [here](https://huggingface.co/Qwen/Qwen2-0.5B). Instructions for downloading can be found [here](https://zhuanlan.zhihu.com/p/663712983). - -For users in Mainland China, you can also use the [hugging face mirror](https://hf-mirror.com/). - -An example of a simple download can be: - -``` -pip install -U hf-transfer - -HF_ENDPOINT=https://hf-mirror.com HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download --resume-download Qwen/Qwen2-0.5B --local-dir Qwen2-0.5B -``` - -# Downloading the Dataset - -**LLMC** requires datasets which are categorized into `calibration datasets` and `evaluation datasets`. The `calibration dataset` can be downloaded [here](https://github.com/ModelTC/llmc/blob/main/tools/download_calib_dataset.py) and the `evaluation dataset` can be downloaded [here](https://github.com/ModelTC/llmc/blob/main/tools/download_eval_dataset.py). - -Additionally, **LLMC** supports downloading datasets online, by setting `download` to True in the `config`. - -```yaml -calib: - name: pileval - download: True -``` - -# Setting Configuration Files - -All `configuration files` can be found [here](https://github.com/ModelTC/llmc/blob/main/configs/), and details on the `configuration files` can be referenced [in this section](https://llmc-en.readthedocs.io/en/latest/configs.html). For example, the SmoothQuant `config` is available [here](https://github.com/ModelTC/llmc/blob/main/configs/quantization/methods/SmoothQuant/smoothquant_w_a.yml). - -```yaml -base: - seed: &seed 42 -model: - type: Qwen2 # Set model name, supporting models like Llama, Qwen2, Llava, Gemma2, etc. - path: # Set the model weight path - torch_dtype: auto -calib: - name: pileval - download: False - path: # Set calibration dataset path - n_samples: 512 - bs: 1 - seq_len: 512 - preproc: pileval_smooth - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: # Set evaluation dataset path - bs: 1 - seq_len: 2048 -quant: - method: SmoothQuant - weight: - bit: 8 - symmetric: True - granularity: per_channel - act: - bit: 8 - symmetric: True - granularity: per_token -save: - save_vllm: True # If set to True, the real quantized integer model is saved for inference with VLLM engine - save_trans: False # If set to True, adjusted floating-point weights will be saved - save_path: ./save -``` - -For more options and details about `save`, please refer to [this section](https://llmc-en.readthedocs.io/en/latest/configs.html). - -**LLMC** provides many [algorithm configuration files](https://github.com/ModelTC/llmc/tree/main/configs/quantization/methods) under the `configs/quantization/methods` path for reference. - -# Running LLMC - -**LLMC** does not require installation; simply modify the `local path` of **LLMC** in the [run script](https://github.com/ModelTC/llmc/blob/main/scripts/run_llmc.sh) as follows: - -```bash -llmc=/path/to/llmc -export PYTHONPATH=$llmc:$PYTHONPATH -``` - -You need to modify the configuration path in the [run script](https://github.com/ModelTC/llmc/blob/main/scripts/run_llmc.sh) according to the algorithm you want to run. For example, `${llmc}/configs/quantization/methods/SmoothQuant/smoothquant_w_a.yml` refers to the SmoothQuant quantization configuration file. `task_name` specifies the name of the `log file` generated by **LLMC** during execution. - -```bash -task_name=smooth_w_a -config=${llmc}/configs/quantization/methods/SmoothQuant/smoothquant_w_a.yml -``` - -Once you have modified the LLMC path and config path in the run script, execute it: - -```bash -bash run_llmc.sh -``` - -# Quantization Inference - -If you have set the option to save `real quantized` models in the configuration file, such as `save_vllm: True`, then the saved `real quantized models` can be directly used for inference with the corresponding `inference backends`. For more details, refer to the `Backend` section of the [documentation](https://llmc-en.readthedocs.io/en/latest). - -# FAQ - -** Q1 ** - -ValueError: Tokenizer class xxx does not exist or is not currently imported. - -** Solution ** - -pip install transformers --upgrade - -** Q2 ** - -If you are running a large model and a single gpu card cannot store the entire model, then the gpu memory will be out during eval. - -** Solution ** - -Use per block for inference, turn on inference_per_block, and increase bs appropriately to improve inference speed without exploding the gpu memory. -``` -bs: 10 -inference_per_block: True -``` - -** Q3 ** - -Exception: ./save/transformed_model existed before. Need check. - -** Solution ** - -The saving path is an existing directory and needs to be changed to a non-existing saving directory. diff --git a/docs/img/fewshot_example_gpt3.png b/docs/img/fewshot_example_gpt3.png new file mode 100644 index 000000000..b19973686 Binary files /dev/null and b/docs/img/fewshot_example_gpt3.png differ diff --git a/docs/interface.md b/docs/interface.md new file mode 100644 index 000000000..47cf00b49 --- /dev/null +++ b/docs/interface.md @@ -0,0 +1,166 @@ +# User Guide + +This document details the interface exposed by `lm-eval` and provides details on what flags are available to users. + +## Command-line Interface + +A majority of users run the library by cloning it from Github, installing the package as editable, and running the `python -m lm_eval` script. + +Equivalently, running the library can be done via the `lm-eval` entrypoint at the command line. + +This mode supports a number of command-line arguments, the details of which can be also be seen via running with `-h` or `--help`: + +- `--model` : Selects which model type or provider is evaluated. Must be a string corresponding to the name of the model type/provider being used. See [the main README](https://github.com/EleutherAI/lm-evaluation-harness/tree/main#model-apis-and-inference-servers) for a full list of enabled model names and supported libraries or APIs. + +- `--model_args` : Controls parameters passed to the model constructor. Accepts a string containing comma-separated keyword arguments to the model class of the format `"arg1=val1,arg2=val2,..."`, such as, for example `--model_args pretrained=EleutherAI/pythia-160m,dtype=float32`. For a full list of what keyword arguments, see the initialization of the `lm_eval.api.model.LM` subclass, e.g. [`HFLM`](https://github.com/EleutherAI/lm-evaluation-harness/blob/365fcda9b85bbb6e0572d91976b8daf409164500/lm_eval/models/huggingface.py#L66) + +- `--tasks` : Determines which tasks or task groups are evaluated. Accepts a comma-separated list of task names or task group names. Must be solely comprised of valid tasks/groups. A list of supported tasks can be viewed with `--tasks list`. + +- `--num_fewshot` : Sets the number of few-shot examples to place in context. Must be an integer. + +- `--gen_kwargs` : takes an arg string in same format as `--model_args` and creates a dictionary of keyword arguments. These will be passed to the models for all called `generate_until` (free-form or greedy generation task) tasks, to set options such as the sampling temperature or `top_p` / `top_k`. For a list of what args are supported for each model type, reference the respective library's documentation (for example, the documentation for `transformers.AutoModelForCausalLM.generate()`.) These kwargs will be applied to all `generate_until` tasks called--we do not currently support unique gen_kwargs or batch_size values per task in a single run of the library. To control these on a per-task level, set them in that task's YAML file. + +- `--batch_size` : Sets the batch size used for evaluation. Can be a positive integer or `"auto"` to automatically select the largest batch size that will fit in memory, speeding up evaluation. One can pass `--batch_size auto:N` to re-select the maximum batch size `N` times during evaluation. This can help accelerate evaluation further, since `lm-eval` sorts documents in descending order of context length. + +- `--max_batch_size` : Sets the maximum batch size to try to fit in memory, if `--batch_size auto` is passed. + +- `--device` : Sets which device to place the model onto. Must be a string, for example, `"cuda", "cuda:0", "cpu", "mps"`. Defaults to "cuda", and can be ignored if running multi-GPU or running a non-local model type. + +- `--output_path` : A string of the form `dir/file.jsonl` or `dir/`. Provides a path where high-level results will be saved, either into the file named or into the directory named. If `--log_samples` is passed as well, then per-document outputs and metrics will be saved into the directory as well. + +- `--log_samples` : If this flag is passed, then the model's outputs, and the text fed into the model, will be saved at per-document granularity. Must be used with `--output_path`. + +- `--limit` : Accepts an integer, or a float between 0.0 and 1.0 . If passed, will limit the number of documents to evaluate to the first X documents (if an integer) per task or first X% of documents per task. Useful for debugging, especially on costly API models. + +- `--use_cache` : Should be a path where a sqlite db file can be written to. Takes a string of format `/path/to/sqlite_cache_` in order to create a cache db at `/path/to/sqlite_cache_rank{i}.db` for each process (0-NUM_GPUS). This allows results of prior runs to be cached, so that there is no need to re-run results in order to re-score or re-run a given (model, task) pair again. + +- `--cache_requests` : Can be "true", "refresh", or "delete". "true" means that the cache should be used. "refresh" means that you wish to regenerate the cache, which you should run if you change your dataset configuration for a given task. "delete" will delete the cache. Cached files are stored under lm_eval/cache/.cache unless you specify a different path via the environment variable: `LM_HARNESS_CACHE_PATH`. e.g. `LM_HARNESS_CACHE_PATH=~/Documents/cache_for_lm_harness`. + +- `--check_integrity` : If this flag is used, the library tests for each task selected are run to confirm task integrity. + +- `--write_out` : Used for diagnostic purposes to observe the format of task documents passed to a model. If this flag is used, then prints the prompt and gold target string for the first document of each task. + +- `--show_config` : If used, prints the full `lm_eval.api.task.TaskConfig` contents (non-default settings the task YAML file) for each task which was run, at the completion of an evaluation. Useful for when one is modifying a task's configuration YAML locally to transmit the exact configurations used for debugging or for reproducibility purposes. + +- `--include_path` : Accepts a path to a folder. If passed, then all YAML files containing `lm-eval` compatible task configurations will be added to the task registry as available tasks. Used for when one is writing config files for their own task in a folder other than `lm_eval/tasks/`. + +- `--system_instruction`: Specifies a system instruction string to prepend to the prompt. + +- `--apply_chat_template` : This flag specifies whether to apply a chat template to the prompt. It can be used in the following ways: + - `--apply_chat_template` : When used without an argument, applies the only available chat template to the prompt. For Hugging Face models, if no dedicated chat template exists, the default chat template will be applied. + - `--apply_chat_template template_name` : If the model has multiple chat templates, apply the specified template to the prompt. + + For Hugging Face models, the default chat template can be found in the [`default_chat_template`](https://github.com/huggingface/transformers/blob/fc35907f95459d7a6c5281dfadd680b6f7b620e3/src/transformers/tokenization_utils_base.py#L1912) property of the Transformers Tokenizer. + +- `--fewshot_as_multiturn` : If this flag is on, the Fewshot examples are treated as a multi-turn conversation. Questions are provided as user content and answers are provided as assistant responses. Requires `--num_fewshot` to be set to be greater than 0, and `--apply_chat_template` to be on. + +- `--predict_only`: Generates the model outputs without computing metrics. Use with `--log_samples` to retrieve decoded results. + +* `--seed`: Set seed for python's random, numpy and torch. Accepts a comma-separated list of 3 values for python's random, numpy, and torch seeds, respectively, or a single integer to set the same seed for all three. The values are either an integer or 'None' to not set the seed. Default is `0,1234,1234` (for backward compatibility). E.g. `--seed 0,None,8` sets `random.seed(0)` and `torch.manual_seed(8)`. Here numpy's seed is not set since the second value is `None`. E.g, `--seed 42` sets all three seeds to 42. + +* `--wandb_args`: Tracks logging to Weights and Biases for evaluation runs and includes args passed to `wandb.init`, such as `project` and `job_type`. Full list [here](https://docs.wandb.ai/ref/python/init). e.g., ```--wandb_args project=test-project,name=test-run``` + +* `--hf_hub_log_args` : Logs evaluation results to Hugging Face Hub. Accepts a string with the arguments separated by commas. Available arguments: + * `hub_results_org` - organization name on Hugging Face Hub, e.g., `EleutherAI`. If not provided, the results will be pushed to the owner of the Hugging Face token, + * `hub_repo_name` - repository name on Hugging Face Hub (deprecated, `details_repo_name` and `results_repo_name` should be used instead), e.g., `lm-eval-results`, + * `details_repo_name` - repository name on Hugging Face Hub to store details, e.g., `lm-eval-results`, + * `results_repo_name` - repository name on Hugging Face Hub to store results, e.g., `lm-eval-results`, + * `push_results_to_hub` - whether to push results to Hugging Face Hub, can be `True` or `False`, + * `push_samples_to_hub` - whether to push samples results to Hugging Face Hub, can be `True` or `False`. Requires `--log_samples` to be set, + * `public_repo` - whether the repository is public, can be `True` or `False`, + * `leaderboard_url` - URL to the leaderboard, e.g., `https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard`. + * `point_of_contact` - Point of contact for the results dataset, e.g., `yourname@example.com`. + * `gated` - whether to gate the details dataset, can be `True` or `False`. + +## External Library Usage + +We also support using the library's external API for use within model training loops or other scripts. + +`lm_eval` supplies two functions for external import and use: `lm_eval.evaluate()` and `lm_eval.simple_evaluate()`. + +`simple_evaluate()` can be used by simply creating an `lm_eval.api.model.LM` subclass that implements the methods described in the [Model Guide](https://github.com/EleutherAI/lm-evaluation-harness/tree/main/docs/model_guide.md), and wrapping your custom model in that class as follows: + +```python +import lm_eval +... + +my_model = initialize_my_model() # create your model (could be running finetuning with some custom modeling code) +... +# instantiate an LM subclass that takes your initialized model and can run +# - `Your_LM.loglikelihood()` +# - `Your_LM.loglikelihood_rolling()` +# - `Your_LM.generate_until()` +lm_obj = Your_LM(model=my_model, batch_size=16) + +# indexes all tasks from the `lm_eval/tasks` subdirectory. +# Alternatively, you can set `TaskManager(include_path="path/to/my/custom/task/configs")` +# to include a set of tasks in a separate directory. +task_manager = lm_eval.tasks.TaskManager() + +# Setting `task_manager` to the one above is optional and should generally be done +# if you want to include tasks from paths other than ones in `lm_eval/tasks`. +# `simple_evaluate` will instantiate its own task_manager if it is set to None here. +results = lm_eval.simple_evaluate( # call simple_evaluate + model=lm_obj, + tasks=["taskname1", "taskname2"], + num_fewshot=0, + task_manager=task_manager, + ... +) +``` + +See the `simple_evaluate()` and `evaluate()` functions in [lm_eval/evaluator.py](../lm_eval/evaluator.py#:~:text=simple_evaluate) for a full description of all arguments available. All keyword arguments to simple_evaluate share the same role as the command-line flags described previously. + +Additionally, the `evaluate()` function offers the core evaluation functionality provided by the library, but without some of the special handling and simplification + abstraction provided by `simple_evaluate()`. + +As a brief example usage of `evaluate()`: + +```python +import lm_eval + +# suppose you've defined a custom lm_eval.api.Task subclass in your own external codebase +from my_tasks import MyTask1 +... + +# create your model (could be running finetuning with some custom modeling code) +my_model = initialize_my_model() +... + +# instantiate an LM subclass that takes your initialized model and can run +# - `Your_LM.loglikelihood()` +# - `Your_LM.loglikelihood_rolling()` +# - `Your_LM.generate_until()` +lm_obj = Your_LM(model=my_model, batch_size=16) + +# optional: the task_manager indexes tasks including ones +# specified by the user through `include_path`. +task_manager = lm_eval.tasks.TaskManager( + include_path="/path/to/custom/yaml" + ) + +# To get a task dict for `evaluate` +task_dict = lm_eval.tasks.get_task_dict( + [ + "mmlu", # A stock task + "my_custom_task", # A custom task + { + "task": ..., # A dict that configures a task + "doc_to_text": ..., + }, + MyTask1 # A task object from `lm_eval.task.Task` + ], + task_manager # A task manager that allows lm_eval to + # load the task during evaluation. + # If none is provided, `get_task_dict` + # will instantiate one itself, but this + # only includes the stock tasks so users + # will need to set this if including + # custom paths is required. + ) + +results = evaluate( + lm=lm_obj, + task_dict=task_dict, + ... +) +``` diff --git a/docs/model_guide.md b/docs/model_guide.md new file mode 100644 index 000000000..810801cbf --- /dev/null +++ b/docs/model_guide.md @@ -0,0 +1,191 @@ +# New Model Guide + +This guide may be of special interest to users who are using the library outside of the repository, via installing the library via pypi and calling `lm_eval.evaluator.evaluate()` to evaluate an existing model. + +In order to properly evaluate a given LM, we require implementation of a wrapper class subclassing the `lm_eval.api.model.LM` class, that defines how the Evaluation Harness should interface with your model. This guide walks through how to write this `LM` subclass via adding it to the library! + +## Setup + +To get started contributing, go ahead and fork the main repo, clone it, create a branch with the name of your model, and install the project requirements in your environment: + +```sh +# After forking... +git clone https://github.com//lm-evaluation-harness.git +cd lm-evaluation-harness +git checkout -b +pip install -e ".[dev]" +``` + +Now, we'll create a new file where we'll be adding our model: + +```sh +touch lm_eval/models/.py +``` + +**Tip: this filename should not shadow package names! For example, naming your file `anthropic.py` is disallowed since the API's name on pypi is `anthropic`, but naming it `anthropic_llms.py` works with no problems.** + +## Interface + +All models must subclass the `lm_eval.api.model.LM` class. + +The LM class enforces a common interface via which we can extract responses from a model: + +```python +class MyCustomLM(LM): + #... + def loglikelihood(self, requests: list[Instance]) -> list[tuple[float, bool]]: + #... + + + def loglikelihood_rolling(self, requests: list[Instance]) -> list[tuple[float, bool]]: + #... + + + def generate_until(self, requests: list[Instance]) -> list[str]: + #... + #... +``` +Where `Instance` is a dataclass defined in [`lm_eval.api.instance`](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/api/instance.py) with property `args` of request-dependent type signature described below. + +We support three types of requests, consisting of different interactions / measurements with an autoregressive LM. + +All three request types take as input `requests` of type `list[Instance]` that have a matching `Instance.request_type` to the method name. + +- `generate_until` + - Each request contains `Instance.args : Tuple[str, dict]` containing 1. an input string to the LM and 2. a dictionary of keyword arguments used to control generation parameters. + - Using this input and these generation parameters, text will be sampled from the language model (typically until a maximum output length or specific stopping string sequences--for example, `{"until": ["\n\n", "."], "max_gen_toks": 128}`). + - The generated input+output text from the model will then be returned. + +- `loglikelihood` + - Each request contains `Instance.args : Tuple[str, str]` containing 1. an input string to the LM and 2. a target string on which the loglikelihood of the LM producing this target, conditioned on the input, will be returned. + - Each request will have, as result, `(ll, is_greedy): Tuple[float, int]` returned, where `ll` is a floating point number representing the log probability of generating the target string conditioned on the input, and `is_greedy` being either the value `0` or `1`, with it being `1` if and only if the target string *would be generated by greedy sampling from the LM* (that is, if the target string is the *most likely* N-token string to be output by the LM given the input. ) + +- `loglikelihood_rolling` + - Each request contains `Instance.args : Tuple[str]`, which is an input string to the model whose *entire* loglikelihood, conditioned on purely the EOT token, will be calculated. + - This is used to evaluate *perplexity* on a data distribution. + - It should return `(ll,) : Tuple[float]` , a.k.a. solely the *loglikelihood* of producing each piece of text given no starting input. + + +To allow a model to be evaluated on all types of tasks, you will need to implement these three types of measurements (note that `loglikelihood_rolling` is a special case of `loglikelihood`). For a reference implementation, check out `lm_eval/models/huggingface.py` ! Additionally, check out `lm_eval.api.model.TemplateLM` for a class that abstracts away some commonly used functions across LM subclasses, or see if your model would lend itself well to subclassing the `lm_eval.models.huggingface.HFLM` class and overriding just the initialization or a couple methods! + +**Tip: be careful of indexing in loglikelihood!** + + +LMs take in tokens in position `[0 1 2 ... N]` and output a probability distribution for token position `N+1`. We provide a simplified graphic here, excerpted from `huggingface.py`: + +``` +# how this all works (illustrated on a causal decoder-only setup): +# CTX CONT +# inp 0 1 2 3|4 5 6 7 8 9 <- last token is deleted by inp[:, :-1] +# model \ \ +# logits 1 2 3|4 5 6 7 8 9 <- the ctx half gets tossed out by the +# cont_toks 4 5 6 7 8 9 [:, -len(continuation_enc):, :self.vocab_size] slice +``` + +The final token of the target is not passed into the LM, because we want the LM's predictions *up to but not past* that final target token. For more information, check out https://github.com/EleutherAI/lm-evaluation-harness/issues/942 . + +## Registration + +Congrats on implementing your model! Now it's time to test it out. + +To make your model usable via the command line interface to `lm-eval` using `python -m lm_eval`, you'll need to tell `lm-eval` what your model's name is. + +This is done via a *decorator*, `lm_eval.api.registry.register_model`. Using `register_model()`, one can both tell the package what the model's name(s) to be used are when invoking it with `python -m lm_eval --model ` and alert `lm-eval` to the model's existence. + +```python +from lm_eval.api.registry import register_model + +@register_model("", "") +class MyCustomLM(LM): +``` + +Using this decorator results in the class being added to an accounting of the usable LM types maintained internally to the library at `lm_eval.api.registry.MODEL_REGISTRY`. See `lm_eval.api.registry` for more detail on what sorts of registries and decorators exist in the library! + +**Tip: be sure to import your model in `lm_eval/models/__init__.py!`** + +## Testing + +We also recommend that new model contributions be accompanied by short tests of their 3 core functionalities, at minimum. To see an example of such tests, look at https://github.com/EleutherAI/lm-evaluation-harness/blob/35bdecd379c0cefad6897e67db892f4a6026a128/tests/test_ggml.py . + +## Chat Templating + +Many models are fine-tuned with a [Chat Template](https://huggingface.co/docs/transformers/main/en/chat_templating) in order to enable back-and-forth interaction between a "User"'s queries and the model (often called "Assistant")'s responses. It can be desirable to evaluate fine-tuned models on evaluation tasks while wrapped in the conversational format they expect. + +In order to make your model optionally compatible with a chat format, three additional methods must be implemented: + +```python +class MyCustomLM(LM): + #... + @property + def tokenizer_name(self) -> str: + """ + Return the name of the model's tokenizer and/or the accompanying chat template. + The returned string is used to cache requests. + + Returns: + str: The name of the model's tokenizer and/or chat template. + """ + + def chat_template(self, chat_template: Union[bool, str] = False) -> str: + """ + Get the appropriate chat template for the model based on the `chat_template` argument. + + This method returns the chat template string to build the prompt from a chat history. + The chat template is saved in the evaluation results for reproducibility. + Boolean arguments should be used with models that have only one chat template, + while string arguments are used with models that have multiple chat templates. + For the reference implementation, see HFLM class in `lm_eval.models.huggingface`. + + Args: + chat_template (Union[bool, str]): Specifies whether to apply a chat template: + - If False: Do not apply any chat template. + - If True: Apply the default chat template. + - If str: Apply the specified chat template by name. + + Returns: + str: The selected chat template in Jinja format. + """ + + def apply_chat_template(self, chat_history: List[Dict[str, str]]) -> str: + """ + Process a chat history to create a string that can be tokenized and input into the model. + + Args: + chat_history (List[Dict[str, str]]): A list of dictionaries representing the chat history, + where each dictionary has "role" and "content" keys. + + Returns: + str: A string representing the chat history that can be tokenized and fed into the model. + """ +``` + +- `apply_chat_template` + - This method performs the bulk of the work required for chat-formatting. + - As input, a `chat_history: List[Dict[str, str]]` is passed in. This is a transcript of a conversation of a form similar to + ``` + [ + {"system": }, + {"user": } + {"assistant": }, + # ... more few-shot examples, potentially + {"user": }, + ] + ``` + which can then be converted into a string input. + - The output is a string representing this conversation that can be fed into the model. + - For example, this consists of simply calling `tokenizer.apply_chat_template` for HFLM--see the implementation there for reference. +- `tokenizer_name` + - LM Eval Harness supports [caching requests](https://github.com/EleutherAI/lm-evaluation-harness/blob/4902aaaf1f374682f95ac25fe2e13b23faddc91a/lm_eval/__main__.py#L140) that are sent to a model, for faster setup when repeating an already-performed evaluation. + - However, we don't want to use the cache of chat transcripts rendered using one chat template or system prompt to send to a model with a different template! So, we use this `lm.tokenizer_name` string to distinguish caches for a given model (and chat template) from one another. +- `chat_template` + - Chat templates are typically provided as a Jinja template string or a string formatted with str.format to include user and assistant messages in a single prompt. This template string is saved in the evaluation results to ensure reproducibility. + +If not implemented for a given model type, the flags `--apply_chat_template` , `--fewshot_as_multiturn`, and `--system_instruction` cannot be used. + +## Other + +**Pro tip**: In order to make the Evaluation Harness overestimate total runtimes rather than underestimate it, HuggingFace models come in-built with the ability to provide responses on data points in *descending order by total input length* via `lm_eval.utils.Reorderer`. Take a look at `lm_eval.models.hf_causal.HFLM` to see how this is done, and see if you can implement it in your own model! + +## Conclusion + +After reading this guide, you should be able to add new model APIs or implementations to the Eval Harness library! diff --git a/docs/new_task_guide.md b/docs/new_task_guide.md new file mode 100644 index 000000000..e9bd2becb --- /dev/null +++ b/docs/new_task_guide.md @@ -0,0 +1,492 @@ +# New Task Guide + +`lm-evaluation-harness` is a framework that strives to support a wide range of zero- and few-shot evaluation tasks on autoregressive language models (LMs). + +This documentation page provides a walkthrough to get started creating your own task, in `lm-eval` versions v0.4.0 and later. + +A more interactive tutorial is available as a Jupyter notebook [here](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/examples/lm-eval-overview.ipynb). + +## Setup + +If you haven't already, go ahead and fork the main repo, clone it, create a branch with the name of your task, and install the project requirements in your environment: + +```sh +# After forking... +git clone https://github.com//lm-evaluation-harness.git +cd lm-evaluation-harness +git checkout -b +pip install -e ".[dev]" +``` + +In this document, we'll walk through the basics of implementing a static benchmark evaluation in two formats: a *generative* task which requires sampling text from a model, such as [`gsm8k`](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/tasks/gsm8k/gsm8k.yaml), and a *discriminative*, or *multiple choice*, task where the model picks the most likely of several fixed answer choices, such as [`sciq`](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/tasks/sciq/sciq.yaml). + +## Creating a YAML file + +To implement a new standard task, we'll need to write a YAML file which configures our task logic. We start by making a new empty YAML file. This file can have any name, but we recommend placing it in a subfolder of `lm_eval/tasks` titled by the dataset or task's shorthand name: for example, + +```sh +touch lm_eval/tasks//.yaml +``` +Or, copy the template subfolder we provide from `templates/new_yaml_task`: +```sh +cp -r templates/new_yaml_task lm_eval/tasks/ +``` +and rename the folders and YAML file(s) as desired. + +### Selecting and configuring a dataset + +All data downloading and management is handled through the HuggingFace (**HF**) [`datasets`](https://github.com/huggingface/datasets) API. So, the first thing you should do is check to see if your task's dataset is already provided in their catalog [here](https://huggingface.co/datasets). If it's not in there, please consider adding it to their Hub to make it accessible to a wider user base by following their [new dataset guide](https://github.com/huggingface/datasets/blob/main/ADD_NEW_DATASET.md) +. + +Once you have a HuggingFace dataset prepared for your task, we want to assign our new YAML to use this dataset: + +```yaml +dataset_path: ... # the name of the dataset on the HF Hub. +dataset_name: ... # the dataset configuration to use. Leave `null` if your dataset does not require a config to be passed. See https://huggingface.co/docs/datasets/load_hub#configurations for more info. +dataset_kwargs: null # any extra keyword arguments that should be passed to the dataset constructor, e.g. `data_dir`. +``` + +Next, we'd like to tell our task what the dataset's train, validation, and test splits are named, if they exist: + +```yaml +training_split: +validation_split: +test_split: +``` +Tests will run on the `test_split` if it is available, and otherwise evaluate on the `validation_split`. + +We can also specify from which split the task should retrieve few-shot examples via: +```yaml +fewshot_split: +``` +or by hardcoding them, either using the following in the yaml file: +```yaml +fewshot_config: + sampler: first_n + samples: [ + {}, + {}, + ] +``` +or by adding the function `list_fewshot_samples` in the associated utils.py file: +```python +def list_fewshot_samples() -> list[dict]: + return [{}, {}] +``` +See `lm_eval/tasks/minerva_math/minerva_math_algebra.yaml` for an example of the latter, and `lm_eval/tasks/gsm8k/gsm8k-cot.yaml` for an example of the former. + +In this case, each sample must contain the same fields as the samples in the above sets--for example, if `doc_to_text` expects an `input` field when rendering input prompts, these provided samples must include an `input` key. + +If neither above options are not set, we will default to train/validation/test sets, in that order. + + +Finally, our dataset may not be already in the exact format we want. Maybe we have to strip whitespace and special characters via a regex from our dataset's "question" field! Or maybe we just want to rename its columns to match a convention we'll be using for our prompts. + +Let's create a python file in the directory where we're writing our YAML file: +```bash +touch lm_eval/tasks//utils.py +``` +Now, in `utils.py` we'll write a function to process each split of our dataset: + +TODO: Change the example to one that's in the tasks/ + +```python +def process_docs(dataset: datasets.Dataset): + def _helper(doc): + # modifies the contents of a single + # document in our dataset. + doc["choices"] = [doc["choice1"], doc["choice2"], doc["wrong_answer"]] + doc["gold"] = doc["label"] + return doc + + return dataset.map(_helper) # returns back a datasets.Dataset object +``` + +Now, in our YAML config file we'll use the `!function` constructor, and tell the config where our imported Python function will come from. At runtime, before doing anything else we will preprocess our dataset according to this function! +```yaml +process_docs: !function utils.process_docs +``` + +### Using Local Datasets + +To load a local dataset for evaluation, you can specify data files in the `dataset_kwargs` field, such as the following for JSON files: + +``` +dataset_path: json +dataset_name: null +dataset_kwargs: + data_files: /path/to/my/json +``` +Or with files already split into separate directories: + +``` +dataset_path: arrow +dataset_kwargs: + data_files: + train: /path/to/arrow/train/data-00000-of-00001.arrow + validation: /path/to/arrow/validation/data-00000-of-00001.arrow +``` + +Alternatively, if you have previously downloaded a dataset from huggingface hub (using `save_to_disk()`) and wish to use the local files, you will need to use `data_dir` under `dataset_kwargs` to point to where the directory is. + +``` +dataset_path: hellaswag +dataset_kwargs: + data_dir: hellaswag_local/ +``` + +You can also set `dataset_path` as a directory path in your local system. This will assume that there is a loading script with the same name as the directory. [See datasets docs](https://huggingface.co/docs/datasets/loading#local-loading-script). + +## Writing a Prompt Template + +The next thing we need to do is decide what format to use when presenting the data to the LM. This is our **prompt**, where we'll define both an input and output format. + +To write a prompt, users will use `doc_to_text`, `doc_to_target`, and `doc_to_choice` (Optional when certain conditions are met). + +`doc_to_text` defines the input string a model will be given while `doc_to_target` and `doc_to_choice` will be used to generate the target text. `doc_to_target` can be either a text string that refers to the target string or an integer that refers to the index of the correct label. When it is set as an index, `doc_to_choice` must be also be set with the appropriate list of possible choice strings. + +### Basic prompts + +If a dataset is straightforward enough, users can enter the feature name directly. This assumes that no preprocessing is required. For example in [Swag](https://github.com/EleutherAI/lm-evaluation-harness/blob/1710b42d52d0f327cb0eb3cb1bfbbeca992836ca/lm_eval/tasks/swag/swag.yaml#L10-L11), `doc_to_text` and `doc_to_target` given the name of one of the feature each. +```yaml +doc_to_text: startphrase +doc_to_target: label +``` +Hard-coding is also possible as is the case in [SciQ](https://github.com/EleutherAI/lm-evaluation-harness/blob/1710b42d52d0f327cb0eb3cb1bfbbeca992836ca/lm_eval/tasks/sciq/sciq.yaml#L11). +```yaml +doc_to_target: 3 +``` +`doc_to_choice` can be directly given a list of text as option (See [Toxigen](https://github.com/EleutherAI/lm-evaluation-harness/blob/1710b42d52d0f327cb0eb3cb1bfbbeca992836ca/lm_eval/tasks/toxigen/toxigen.yaml#L11)) +```yaml +doc_to_choice: ['No', 'Yes'] +``` + +if a dataset feature is already a list, you can set the name of the feature as `doc_to_choice` (See [Hellaswag](https://github.com/EleutherAI/lm-evaluation-harness/blob/e0eda4d3ffa10e5f65e0976161cd134bec61983a/lm_eval/tasks/hellaswag/hellaswag.yaml#L13)) +``` +doc_to_choice: choices +``` + + + +### Writing a prompt with Jinja 2 + +We support the [Jinja 2](https://jinja.palletsprojects.com/en/3.1.x/) templating language for writing prompts. In practice, this means you can take your dataset's columns and do many basic string manipulations to place each document into prompted format. + +Take for example the dataset `super_glue/boolq`. As input, we'd like to use the features `passage` and `question` and string them together so that for a a sample line `doc`, the model sees something the format of: +``` +doc["passage"] +Question: doc["question"]? +Answer: +``` +We do this by [writing](https://github.com/EleutherAI/lm-evaluation-harness/blob/1710b42d52d0f327cb0eb3cb1bfbbeca992836ca/lm_eval/tasks/super_glue/boolq/default.yaml#L9C1-L9C61) +```yaml +doc_to_text: "{{passage}}\nQuestion: {{question}}?\nAnswer:" +``` +Such that `{{passage}}` will be replaced by `doc["passage"]` and `{{question}}` with `doc["question"]` when rendering the prompt template. + +Our intended output is for the model to predict a single whitespace, and then the answer to the question. We do this via: +```yaml +doc_to_target: "{{answer}}" +``` + + +**Important**: we now add `target_delimiter` between input and target which defaults to " ", such that the full input-output string is `doc_to_target(doc) + target_delimiter + doc_to_text(doc)`. `doc_to_text` and `doc_to_target` should not contain trailing right or left whitespace, respectively. + + +#### Multiple choice format + +For tasks which are multiple choice (a fixed, finite set of label words per each document) and evaluated via comparing loglikelihoods of all label words (the `multiple_choice` task output type) we enforce a particular convention on prompt format. + +An annotated example in the case of SciQ is as follows: + +```yaml +doc_to_text: "{{support.lstrip()}}\nQuestion: {{question}}\nAnswer:" # This is the input portion of the prompt for this doc. It will have " {{choice}}" appended to it as target for each choice in answer_choices. +doc_to_target: 3 # this contains the index into the answer choice list of the correct answer. +doc_to_choice: "{{[distractor1, distractor2, distractor3, correct_answer]}}" +``` +Task implementers are thus able to decide what the answer choices should be for a document, and what prompt format to use. + +The label index can also be sourced from a feature directly. For example in `superglue/boolq`, the label index if defined in the feature `label`. We can set `doc_to_target` as simply `label`. The options or verbalizers can be written in a the form of a list `["no", "yes"]` that will correspond to the label index. + +```yaml +doc_to_text: "{{passage}}\nQuestion: {{question}}?\nAnswer:" +doc_to_target: label +doc_to_choice: ["no", "yes"] +``` + +### Using Python Functions for Prompts + +There may be cases where the prompt we want to implement is easier expressed in Python instead of Jinja 2. For this, we can use Python helper functions that are defined in the YAML config. It should be noted that the function script must be in the same directory as the yaml. + +A good example is WikiText that requires a lot of regex rules to clean the samples. +``` +def wikitext_detokenizer(doc): + string = doc["page"] + # contractions + string = string.replace("s '", "s'") + string = re.sub(r"/' [0-9]/", r"/'[0-9]/", string) + ... + string = string.replace(" 's", "'s") + + return string +``` + +We can load this function in `doc_to_target` by using a `!function` operator after `doc_to_target` and followed by `.`. In the file [wikitext.yaml](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/tasks/wikitext/wikitext.yaml) we write: +``` +doc_to_target: !function preprocess_wikitext.wikitext_detokenizer +``` + +### Importing a Prompt from Promptsource + +[Promptsource](https://github.com/bigscience-workshop/promptsource/tree/main/promptsource) is a great repository for crowdsourced prompts for many datasets. We can load these prompts easily by using the `use_prompt` argument and filling it with the format `"promptsource:"`. To use this, `doc_to_text` and `doc_to_target` should be left undefined. This will fetch the template of the dataset defined in the YAML file. + +For example, For Super Glue BoolQ, if we want to use the prompt template `GPT-3 Style` we can add this to the YAML file. +``` +use_prompt: "promptsource:GPT-3 Style" +``` + +If you would like to run evaluation on all prompt templates, you can simply call it this way. +``` +use_prompt: "promptsource:*" +``` + +### Setting metrics + +You're almost done! Now we need to choose how to score our task. +- *If this is a multiple choice task:* do you just want to check your model's accuracy in choosing the correct answer choice? +- *If this is a generation task:* do you just want to check how often your model outputs *exactly the ground-truth output string provided*? + + +If the answer to the above is no: you'll need to record what scoring metrics to use! Metrics can be listed in the following format: + +```yaml +metric_list: + - metric: + aggregation: + higher_is_better: + - metric: !function script.function + aggregation: ... + higher_is_better: ... +``` +`aggregation` and `higher_is_better` can optionally be left out to default to the manually-set defaults if using a natively supported metric, otherwise it must be defined explicitly (for example, when using a custom metric implemented as a function). + +For a full list of natively supported metrics and aggregation functions see [`docs/task_guide.md`](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/task_guide.md). All metrics supported in [HuggingFace Evaluate](https://github.com/huggingface/evaluate/tree/main/metrics) can also be used, and will be loaded if a given metric name is not one natively supported in `lm-eval` or `hf_evaluate` is set to `true`. + +### Optional, More Advanced Setup + +Some tasks may require more advanced processing logic than is described in this guide. + +As a heuristic check: +* Does your task require generating multiple free-form outputs per input document? +* Does your task require complex, multi-step post-processing of generated model outputs? +* Does your task require subsetting documents on the fly based on their content? +* Do you expect to compute metrics after applying multiple such processing steps on your model outputs? +* Does your task rely on metrics that need a custom implementation? + +For more detail on the task system and advanced features, see [`docs/task_guide.md`](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/task_guide.md) . If none of the above sound like they apply to your task, it's time to continue onto checking your task performance! + +### Task name + tags (registering a task) + +To test a task conveniently, it helps to *register* the task--that is, to give it a name and make the `lm-eval` library aware it exists! + +If you're writing your YAML file inside the `lm_eval/tasks` folder, you just need to give your task a name! You can do this inside your YAML file: + +```yaml +task: +``` +Including a task name is mandatory. + +It is often also convenient to label your task with several `tag` values, though this field is optional: + +```yaml +tag: + - tag1 + - tag2 +``` +This will add your task to the `tag1` and `tag2` tags, enabling people to know how to categorize your task, and if desired run all tasks in one of these groups at once, your task along with them. + + +If your task is not in the `lm_eval/tasks` folder, you'll need to tell the Eval Harness where to look for YAML files. + +You can do this via the `--include_path` argument in `__main__.py`. This command will be used to initialize the `TaskManager` object which you can also use for your custom scripts. + +```python +task_manager = TaskManager(args.verbosity, include_path=args.include_path) +``` + +Passing `--tasks /path/to/yaml/file` is also accepted. + + +### Advanced Group Configs + +While `tag` values are helpful when you want to be able to quickly and conveniently run a set of related tasks via `--tasks my_tag_name`, often, we wish to implement more complex logic. For example, the MMLU benchmark contains 57 *subtasks* that must all be *averaged* together in order to report a final 'MMLU score'. + +Groupings of tasks might also use particular variants of a task--for example, we might want to default to evaluating a task as 5-shot when called as part of a given grouping, but not have a preference for number of shots when evaluating it as a standalone. + +We implement this via **groups**, which are distinct from tags. Groups can be implemented via *group config* YAML files, which are laid out similarly but slightly differently to tasks' YAML configs. + +The most basic form of group can be defined via a YAML config similar to the following: + +```yaml +group: nli_tasks +task: + - cb + - anli_r1 + - rte +metadata: + version: 1.0 +``` + +This will behave almost identically to a `tag` that includes these 3 tasks, but with one key distinction: we'll print the `nli_tasks` group as a row (with no associated metrics) in our table of outputs, and visually show that these 3 tasks appear under its subheader. + + +Now, let's assume we actually want to report an aggregate score for `nli_tasks`. We would instead use a YAML config like the following: + +```yaml +group: nli_tasks +task: + - cb + - anli_r1 + - rte +aggregate_metric_list: + - metric: acc + aggregation: mean + weight_by_size: true # defaults to `true`. Set this to `false` to do a "macro" average (taking each subtask's average accuracy, and summing those accuracies and dividing by 3)--by default we do a "micro" average (retain all subtasks' per-document accuracies, and take the mean over all documents' accuracies to get our aggregate mean). +metadata: + version: 1.0 +``` + +Similar to our `metric_list` for listing out the metrics we want to calculate for a given task, we use an `aggregate_metric_list` field to specify which metric name to aggregate across subtasks, what aggregation function to use, and whether we should micro- or macro- average these metrics. See [./task_guide.md](./task_guide.md) for a full list of related sub-keys. + +**[!Tip]: currently, we predominantly only support the aggregation of group metrics that use `mean` (either micro- or macro- averaged) over their subtasks. If you require even more complex aggregation rules, you may want to perform aggregation offline.** + +Group configs can be fairly complex! We can do various operations, such as defining new subtask(s) inline in our group YAML, overriding an existing task's specific config value, or nesting existing groups within our + +For example, let's build a config for evaluating MMLU and a few natural language inference tasks. For MMLU, we can write the name for the benchmark as a subtask written under `task`. You can configure the parameters such as `num_fewshot`. If the task being configured is a group such as `mmlu` or `super_glue`, the parameter set will be applied to all of the subtasks. + +```yaml +group: nli_and_mmlu +task: + - group: nli_tasks + task: + - cb + - anli_r1 + - rte + aggregate_metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - task: mmlu + num_fewshot: 2 +``` + +### Configuring python classes + +There can occasions when yaml-based tasks cannot accommodate how a task is handled. LM-Eval supports the manually implementing tasks as was previously done before `0.4.x`. To register the task, you can simply make a yaml with the name of the task in `task` and the class object in `class` using the `!function` prefix. + +```yaml +task: squadv2 +class: !function task.SQuAD2 +``` + +This also applies to building group configurations with subtasks that are python classes. + +```yaml +group: scrolls +task: + - task: scrolls_qasper + class: !function task.Qasper + - task: scrolls_quality + class: !function task.QuALITY + - task: scrolls_narrativeqa + class: !function task.NarrativeQA + ... +``` + +You can also pass a custom argument to your class by accepting `config` in the custom class constructor. +Here's how to do it: + +```yaml +task: 20_newsgroups +class: !function task.Unitxt +recipe: card=cards.20_newsgroups,template=templates.classification.multi_class.title +``` + +In this example, `recipe` is the custom argument for the `Unitxt` class. + +## Beautifying Table Display + +To avoid conflict, each task needs to be registered with a unique name. Because of this, slight variations of task are still counted as unique tasks and need to be named uniquely. This could be done by appending an additional naming that may refer to the variation such as in MMLU where the template used to evaluated for flan are differentiated from the default by the prefix `mmlu_flan_*`. Printing the full task names can easily clutter the results table at the end of the evaluation especially when you have a long list of tasks or are using a benchmark that comprises of many tasks. To make it more legible, you can use `task_alias` and `group_alias` to provide an alternative task name and group name that will be printed. For example in `mmlu_abstract_algebra.yaml` we set `task_alias` to `abstract_algebra`. In group configs, a `group_alias` for a group can also be set. + +``` +"dataset_name": "abstract_algebra" +"description": "The following are multiple choice questions (with answers) about abstract\ + \ algebra.\n\n" +"include": "_default_template_yaml" +"task": "mmlu_abstract_algebra" +"task_alias": "abstract_algebra" +``` + +## Checking validity + +After registering your task, you can now check on your data downloading and verify that the few-shot samples look as intended. Run the following command with your desired args: + +```bash +python -m scripts.write_out \ + --output_base_path \ + --tasks \ + --sets \ + --num_fewshot K \ + --num_examples N \ +``` + +Open the file specified at the `--output_base_path ` and ensure it passes +a simple eye test. + +## Versioning + +One key feature in LM Evaluation Harness is the ability to version tasks and groups--that is, mark them with a specific version number that can be bumped whenever a breaking change is made. + +This version info can be provided by adding the following to your new task or group config file: + +``` +metadata: + version: 0 +``` + +Now, whenever a change needs to be made to your task in the future, please increase the version number by 1 so that users can differentiate the different task iterations and versions. + +If you are incrementing a task's version, please also consider adding a changelog to the task's README.md noting the date, PR number, what version you have updated to, and a one-liner describing the change. + +for example, + +* \[Dec 25, 2023\] (PR #999) Version 0.0 -> 1.0: Fixed a bug with answer extraction that led to underestimated performance. + +## Checking performance + equivalence + +It's now time to check models' performance on your task! In the evaluation harness, we intend to support a wide range of evaluation tasks and setups, but prioritize the inclusion of already-proven benchmarks following the precise evaluation setups in the literature where possible. + +To enable this, we provide a checklist that should be completed when contributing a new task, to enable accurate book-keeping and to ensure that tasks added to the library are well-tested and, where applicable, precedented. + +### Task Validity Checklist + +The checklist is the following: + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? + +It is recommended to include a filled-out copy of this checklist in the README.md for the subfolder you are creating, if you have created a new subfolder in `lm_eval/tasks`. + +**Finally, please add a short description of your task(s), along with a link to its subfolder in lm_eval/tasks , to [`lm_eval/tasks/README.md`](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/tasks/README.md) so that users can discover your task in the library, and follow the link to your README for more information about the variants supported, their task names, and the original source of the dataset and/or evaluation setup.** + +## Submitting your task + +You're all set! Now push your work and make a pull request to the `main` branch! Thanks for the contribution :). If there are any questions, please leave a message in the `#lm-thunderdome` channel on the EAI discord! diff --git a/docs/task_guide.md b/docs/task_guide.md new file mode 100644 index 000000000..34e47c413 --- /dev/null +++ b/docs/task_guide.md @@ -0,0 +1,317 @@ +# Task Configuration + +The `lm-evaluation-harness` is meant to be an extensible and flexible framework within which many different evaluation tasks can be defined. All tasks in the new version of the harness are built around a YAML configuration file format. + +These YAML configuration files, along with the current codebase commit hash, are intended to be shareable such that providing the YAML config enables another researcher to precisely replicate the evaluation setup used by another, in the case that the prompt or setup differs from standard `lm-eval` task implementations. + +While adding a standard evaluation task on a new dataset can be occasionally as simple as swapping out a Hugging Face dataset path in an existing file, more specialized evaluation setups also exist. Here we'll provide a crash course on the more advanced logic implementable in YAML form available to users. + +If your intended task relies on features beyond what are described in this guide, we'd love to hear about it! Feel free to open an issue describing the scenario on Github, create a PR to the project with a proposed implementation, or ask in the `#lm-thunderdome` channel on the EleutherAI discord. + +## Configurations + +Tasks are configured via the `TaskConfig` object. Below, we describe all fields usable within the object, and their role in defining a task. + +### Parameters + +Task naming + registration: +- **task** (`str`, defaults to None) — name of the task. +- **task_alias** (`str`, defaults to None) - Alias of the task name that will be printed in the final table results. +- **tag** (`str`, *optional*) — name of the task tags(s) a task belongs to. Enables one to run all tasks with a specified tag name at once. + +Dataset configuration options: +- **dataset_path** (`str`) — The name of the dataset as listed by HF in the datasets Hub. +- **dataset_name** (`str`, *optional*, defaults to None) — The name of what HF calls a “data instance” or sub-task of the benchmark. If your task does not contain any data instances, just leave this to default to None. (If you're familiar with the HF `datasets.load_dataset` function, these are just the first 2 arguments to it.) +- **dataset_kwargs** (`dict`, *optional*) — Auxiliary arguments that `datasets.load_dataset` accepts. This can be used to specify arguments such as `data_files` or `data_dir` if you want to use local datafiles such as json or csv. +- **training_split** (`str`, *optional*) — Split in the dataset to use as the training split. +- **validation_split** (`str`, *optional*) — Split in the dataset to use as the validation split. +- **test_split** (`str`, *optional*) — Split in the dataset to use as the test split. +- **fewshot_split** (`str`, *optional*) — Split in the dataset to draw few-shot exemplars from. assert that this not None if num_fewshot > 0. +- **process_docs** (`Callable`, *optional*) — Optionally define a function to apply to each HF dataset split, to preprocess all documents before being fed into prompt template rendering or other evaluation steps. Can be used to rename dataset columns, or to process documents into a format closer to the expected format expected by a prompt template. + +Prompting / in-context formatting options: +- **use_prompt** (`str`, *optional*) — Name of prompt in promptsource to use. if defined, will overwrite doc_to_text, doc_to_target, and doc_to_choice. +- **description** (`str`, *optional*) — An optional prepended Jinja2 template or string which will be prepended to the few-shot examples passed into the model, often describing the task or providing instructions to a model, such as `"The following are questions (with answers) about {{subject}}.\n\n"`. No delimiters or spacing are inserted between the description and the first few-shot example. +- **doc_to_text** (`Union[Callable, str]`, *optional*) — Jinja2 template, string, or function to process a sample into the appropriate input for the model. +- **doc_to_target** (`Union[Callable, str]`, *optional*) — Jinja2 template, string, or function to process a sample into the appropriate target output for the model. For multiple choice tasks, this should return an index into the answer choice list of the correct answer. +- **doc_to_choice** (`Union[Callable, str]`, *optional*) — Jinja2 template, string, or function to process a sample into a list of possible string choices for `multiple_choice` tasks. Left undefined for `generate_until` tasks. +- **fewshot_delimiter** (`str`, *optional*, defaults to "\n\n") — String to insert between few-shot examples. +- **target_delimiter** (`str`, *optional*, defaults to `" "`) — String to insert between input and target output for the datapoint being tested. + +Runtime configuration options: +- **num_fewshot** (`int`, *optional*, defaults to 0) — Number of few-shot examples before the input. +- **batch_size** (`int`, *optional*, defaults to 1) — Batch size. + +Scoring details: +- **metric_list** (`str`, *optional*, defaults to None) — A list of metrics to use for evaluation. See docs for expected format. +- **output_type** (`str`, *optional*, defaults to "generate_until") — Selects the type of model output for the given task. Options are `generate_until`, `loglikelihood`, `loglikelihood_rolling`, and `multiple_choice`. +- **generation_kwargs** (`dict`, *optional*) — Auxiliary arguments for the `generate` function from HF transformers library. Advanced keyword arguments may not be supported for non-HF LM classes. +- **repeats** (`int`, *optional*, defaults to 1) — Number of repeated runs through model for each sample. can be used for cases such as self-consistency. +- **filter_list** (`Union[str, list]`, *optional*) — List of filters to postprocess model outputs. See below for further detail on the filter API. +- **should_decontaminate** (`bool`, *optional*, defaults to False) - Whether to decontaminate or not. +- **doc_to_decontamination_query** (`str`, *optional*) — Query for decontamination if `should_decontaminate` is True. If `should_decontaminate` is True but `doc_to_decontamination_query` is `None`, `doc_to_decontamination_query` will follow `doc_to_text`. + +Other: +- **metadata** (`dict`, *optional*) — An optional field where arbitrary metadata can be passed. Most tasks should include a `version` key in this field that is used to denote the version of the yaml config. Other special metadata keys are: `num_fewshot`, to override the printed `n-shot` table column for a task. + +## Filters + +A key component of the `lm-evaluation-harness` library is the `Filter` object. In a typical evaluation run of the harness, we take the formatted inputs and run them through our LM, with the appropriate output type (greedy or free-form generation, or loglikelihood-based comparative scoring). + +After getting scores or output text from our LM on each `Instance` or document in the dataset, we then need to feed these responses into a metric or scoring function to return scores to a user. + +However, certain tasks may require more complex behavior than directly turning over model outputs to a metric function. For example, we may want to post-process our output text by truncating it or extracting a model's answer, we may want to ensemble over multiple "takes" on a different document, et cetera. + +**Detailed Aside**: +We do such post-processing by operating on *responses*, which are stored after running an LM on an `Instance` from the task in `Instance.resps`. + +`resps` is a `List[str]` for each instance, and we pass a `List[List[]]` to our filters that is a list of `[instance.resps for instance in instances]`. + +Our filters, after completing a pipeline, must return a `List[]` which we then unpack and store each element of in `Instance.filtered_resps` for the corresponding instance. Thus, we take as input a list of returns from our model for each doc, and must return a return from our model *without it being wrapped in a list* for each doc. + +**End Aside** + + +A full list of supported filter operations can be found in `lm_eval/filters/__init__.py`. Contributions of new filter types are welcome! + +### Multiple Filter Pipelines + +Tasks need not be limited to a single filter pipeline. We enable users to run multiple, distinct, filter pipelines on *the same model outputs* generated in one run on a task. + +As a case study, let's look at an implementation of solving the Gsm8k math word problem benchmark in `lm_eval/tasks/gsm8k/gsm8k-cot-self-consistency.yaml`. Here, we are emulating the setup used by [Self-Consistency Improves Chain of Thought Prompting](https://arxiv.org/abs/2203.11171), in which evaluation is performed by generating N chain-of-thought outputs from a model via temperature-based sampling, then selecting the answers output by the model at the end of the chains of thought, then majority voting across all those numeric answers. + +Within our YAML file: + +```yaml +... +repeats: 64 +filter_list: + - name: "score-first" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)" + - function: "take_first" + - name: "maj@64" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)" + - function: "majority_vote" + - function: "take_first" + - name: "maj@8" + filter: + - function: "take_first_k" + k: 8 + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)" + - function: "majority_vote" + - function: "take_first" +``` + +We are able to provide multiple different filter pipelines, each with their own name and list of filters to apply in sequence. + +Our first filter pipeline implements +- applying a regex to the model generations (extracting the number within the phrase "The answer is (number)") +- selecting only the first out of the 64 model answers + +Then scoring this single answer. + +```yaml +- name: "score-first" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)" + - function: "take_first" +``` + +Our second filter pipeline, "maj@64", does majority voting across all 64 answers via: +- applying the same regex to all responses, to get the numerical answer from the model for each of the 64 responses per problem +- applying majority voting to all responses, which then returns a length-1 `[]` list for each +- taking the first element of this length-1 list, to then score the sole response `` for each document. + +```yaml +- name: "maj@64" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)" + - function: "majority_vote" + - function: "take_first" +``` + +Our final filter pipeline, "maj@8", does majority voting across the first 8 of the model's responses per document via: +- subsetting the len-64 list of responses `[answer1, answer2, ..., answer64]` to `[answer1, answer2, ..., answer8]` for each document +- performing the same sequence of filters on these new sets of 8 responses, for each document. +```yaml +- name: "maj@8" + filter: + - function: "take_first_k" + k: 8 + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)" + - function: "majority_vote" + - function: "take_first" +``` + +Thus, given the 64 responses from our LM on each document, we can report metrics on these responses in these 3 different ways, as defined by our filter pipelines. + + +### Adding a custom filter + +Just like adding a custom model with `register_model` decorator one is able to do the same with filters, for example + +```python +from lm_eval.api.filter import Filter +from lm_eval.api.registry import register_filter + +@register_filter("new_filter") +class NewFilter(Filter) + ... +``` + + + +## Embedded Python Code + +Use can use python functions for certain arguments by using the `!function` operator after the argument name followed by `.`. This feature can be used for the following arguments: +1. `doc_to_text` +2. `doc_to_target` +3. `doc_to_choice` +4. `aggregation` for a `metric` in `metric_list` + +## (No Longer Recommended) Direct `Task` Subclassing + +The prior implementation method of new tasks was to subclass `Task`. While we intend to migrate all tasks to the new YAML implementation option going forward, it remains possible to subclass the Task class and implement custom logic. For more information, see `docs/task_guide.md` in v0.3.0 of the `lm-evaluation-harness`. + + +## Including a Base YAML + +You can base a YAML on another YAML file as a template. This can be handy when you need to just change the prompt for `doc_to_text` but keep the rest the same or change `filters` to compare which is better. Simply use `include` in the YAML file and write the name of the template you want to base from. This assumes that the base temeplate is in the same directory. Otherwise, You will need to define the full path. +``` +include: +... +``` +You can find an example of how to use this feature at [gsm8k-cot-self-consistency.yaml](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/tasks/gsm8k/gsm8k-cot-self-consistency.yaml) where it is based off [gsm8k-cot.yaml](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/tasks/gsm8k/gsm8k-cot.yaml) + + +## Passing Arguments to Metrics + +Metrics can be defined in the `metric_list` argument when building the YAML config. Multiple metrics can be listed along with any auxiliary arguments. For example, setting the [`exact_match` metric](https://github.com/huggingface/evaluate/tree/main/metrics/exact_match), auxiliary arguments such as `ignore_case`, `ignore_punctuation`, `regexes_to_ignore` can be listed as well. They will be added to the metric function as `kwargs`. Some metrics have predefined values for `aggregation` and `higher_is_better` so listing the metric name only can be sufficient. + +``` +metric_list: + - metric: acc + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: false + regexes_to_ignore: + - "," + - "\\$" +``` + +### Natively Supported Metrics + +Here we list all metrics currently supported natively in `lm-eval`: + +Metrics: +* `acc` (accuracy) +* `acc_norm` (length-normalized accuracy) +* `acc_mutual_info` (baseline loglikelihood - normalized accuracy) +* `perplexity` +* `word_perplexity` (perplexity per word) +* `byte_perplexity` (perplexity per byte) +* `bits_per_byte` +* `matthews_corrcoef` (Matthews correlation coefficient) +* `f1` (F1 score) +* `bleu` +* `chrf` +* `ter` + +Aggregation functions: +* `mean` +* `median` +* `perplexity` +* `weighted_perplexity` +* `bits_per_byte` + +### Adding a Multiple Choice Metric + +Adding a multiple choice metric has a few steps. To get it working you need to: + +1. register a metric function +2. register an aggregation function +3. update the `Task` definition to make sure the correct arguments are passed + +The default metric and aggregation functions are in `lm_eval/api/metrics.py`, and you can add a function there if it's for general use. The metrics are towards the bottom of the file and look like this: + + + @register_metric( + metric="mcc", + higher_is_better=True, + output_type="multiple_choice", + aggregation="matthews_corrcoef", + ) + def mcc_fn(items): # This is a passthrough function + return items + +Note that many of these are passthrough functions, and for multiple choice (at least) this function is never actually called. + +Aggregation functions are defined towards the top of the file, here's an example: + + @register_aggregation("matthews_corrcoef") + def matthews_corrcoef(items): + unzipped_list = list(zip(*items)) + golds = unzipped_list[0] + preds = unzipped_list[1] + return sklearn.metrics.matthews_corrcoef(golds, preds) + +This function returns a single numeric value. The input is defined in `Task.process_results` in `lm_eval/api/task.py`. There's a section that looks like this: + + + result_dict = { + **({"acc": acc} if "acc" in use_metric else {}), + **({"f1": (gold, pred)} if "f1" in use_metric else {}), + **({"mcc": (gold, pred)} if "mcc" in use_metric else {}), + **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}), + **({"exact_match": exact_match} if "exact_match" in use_metric else {}), + } + +The value here determines the input to the aggregation function, though the name used matches the metric function. These metrics all have simple needs and just need the accuracy or gold and predicted values, but immediately below this there are examples of metrics with more complicated needs you can use as reference. + +## Good Reference Tasks + +Contributing a new task can be daunting! Luckily, much of the work has often been done for you in a different, similarly evaluated task. Good examples of task implementations to study include: + +Multiple choice tasks: +- SciQ (`lm_eval/tasks/sciq/sciq.yaml`) + +Corpus perplexity evaluations: +- Wikitext (`lm_eval/tasks/wikitext/wikitext.yaml`) + +Generative tasks: +- GSM8k (`lm_eval/tasks/gsm8k/gsm8k.yaml`) + +Tasks using complex filtering: +- GSM8k with CoT (+ with Self-Consistency): (`lm_eval/tasks/gsm8k/gsm8k-cot.yaml` ; `lm_eval/tasks/gsm8k/gsm8k-cot-self-consistency.yaml`) + +# Group Configuration + +When evaluating a language model, it's is not unusual to test across a number of tasks that may not be related to one another in order to assess a variety of capabilities. To this end, it may be combursome to have to list the set of tasks or add a new group name to each yaml of each individual task. + +To solve this, we can create a **group** yaml config. This is a config that contains the names of the tasks that should be included in a particular group. The config consists of two main keys: a `group` key which denotes the name of the group (as it would be called from the command line, e.g. `mmlu`) and a `task` key which is where we can list the tasks. The tasks listed in `task` are the task names that have been registered. A good example of a group yaml config can be found at [../lm_eval/tasks/mmlu/default/_mmlu.yaml]. See also the [New Task Guide](./new_task_guide.md) for a more in-depth and tutorial-esque explanation of how to write complex GroupConfigs. + +## Configurations + +Groups are configured via the `GroupConfig` object. Below, we describe all fields usable within the object, and their role in defining a task. + +### Parameters + +- **group** (`str`, defaults to `None`) — name of the group. Used to invoke it from the command line. +- **group_alias** (`str`, defaults to `None`) - Alternative name for the group that will be printed in the table output. +- **task** (`Union[str, list]`, defaults to `None`) - List of tasks that constitute the group. +- **aggregate_metric_list** (`list`, defaults to `None`) - similar to `metric_list` in TaskConfigs, provide a list of configurations for metrics that should be aggregated across subtasks. Leaving empty will result in no aggregation being performed for this group. Keys for each list entry are: + - `metric: str` - the name of the metric to aggregate over (all subtasks must report a metric holding this name.) + - `aggregation: str` - what aggregation function to apply to aggregate these per-subtask metrics. **currently, only `mean` is supported.** + - `weight_by_size: bool = True` whether to perform micro- averaging (`True`) or macro- (`False`) averaging of subtasks' accuracy scores when reporting the group's metric. MMLU, for example, averages over per-document accuracies (the *micro average*), resulting in the same accuracy as if one simply concatenated all 57 subjects into a single dataset and evaluated accuracy on that dataset. + - `filter_list: Union[str, List[str]] = "none"` - what filter keys one should match on to aggregate results. For example, if trying to aggregate over the `exact_match` metric using `strict-match` filter for `bbh_cot_zeroshot`, then set this to be `filter_list: "strict-match"`. +- **metadata** (`dict`, *optional*) - As with TaskConfigs, a field where extra config metadata can be passed. set the `num_fewshot` key within this to override the printed n_shot value in a results table for your group, for example. diff --git a/docs/zh_cn/.readthedocs.yaml b/docs/zh_cn/.readthedocs.yaml deleted file mode 100644 index 034fffa7c..000000000 --- a/docs/zh_cn/.readthedocs.yaml +++ /dev/null @@ -1,17 +0,0 @@ -version: 2 - -# Set the version of Python and other tools you might need -build: - os: ubuntu-20.04 - tools: - python: "3.10" - -formats: - - epub - -sphinx: - configuration: docs/zh_cn/source/conf.py - -python: - install: - - requirements: requirements/docs.txt \ No newline at end of file diff --git a/docs/zh_cn/Makefile b/docs/zh_cn/Makefile deleted file mode 100644 index d0c3cbf10..000000000 --- a/docs/zh_cn/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = source -BUILDDIR = build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/zh_cn/make.bat b/docs/zh_cn/make.bat deleted file mode 100644 index 747ffb7b3..000000000 --- a/docs/zh_cn/make.bat +++ /dev/null @@ -1,35 +0,0 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set SOURCEDIR=source -set BUILDDIR=build - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.https://www.sphinx-doc.org/ - exit /b 1 -) - -if "%1" == "" goto help - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% - -:end -popd diff --git a/docs/zh_cn/source/advanced/VLM_quant&img-txt_dataset.md b/docs/zh_cn/source/advanced/VLM_quant&img-txt_dataset.md deleted file mode 100644 index ec67a12fd..000000000 --- a/docs/zh_cn/source/advanced/VLM_quant&img-txt_dataset.md +++ /dev/null @@ -1,81 +0,0 @@ -# VLM 量化和 custom_mm 数据集 - -llmc目前支持对VLM模型使用图像-文本数据集进行校准并量化 - -## VLM 量化 -当前支持的模型如下: -1. llava - -2. intervl2 - -3. llama3.2 - -4. qwen2vl - -更多的vlm正在实现中 - -下面是一个配置的例子,可以参考GitHub上的[校准数据集模板](https://github.com/user-attachments/files/18433608/general_custom_data_examples.zip)。 - -```yaml -model: - type: Llava - path: model path - tokenizer_mode: slow - torch_dtype: auto -calib: - name: custom_mm - download: False - path: calib data path - apply_chat_template: True - add_answer: True # Defalut is False. If set it to Ture, calib data will add answers. - n_samples: 8 - bs: -1 - seq_len: 512 - padding: True -``` - -## custom_mm 数据集 -custom_mm 数据集格式如下: -``` -custom_mm-datasets/ -├── images/ -│ ├── image1.jpg -│ ├── image2.jpg -│ ├── image3.jpg -│ └── ... (other images) -└── img_qa.json -``` - -img_qa.json 格式示例: -```json -[ - { - "image": "images/0a3035bfca2ab920.jpg", - "question": "Is this an image of Ortigia? Please answer yes or no.", - "answer": "Yes" - }, - { - "image": "images/0a3035bfca2ab920.jpg", - "question": "Is this an image of Montmayeur castle? Please answer yes or no.", - "answer": "No" - }, - { - "image": "images/0ab2ed007db301d5.jpg", - "question": "Is this a picture of Highgate Cemetery? Please answer yes or no.", - "answer": "Yes" - } -] -``` -"answer" 可以不需要 - -custom_mm数据集中可以存在仅有文本的校准数据(当前llama3.2除外) - -## VLM 测评 - -llmc接入了[lmms-eval](https://github.com/EvolvingLMMs-Lab/lmms-eval)进行各种下游数据集测评,在config的eval中需要指定type为vqa,name中的下游测评数据集参考lmms-eval的标准。 - -``` -eval: - type: vqa - name: [mme] # vqav2, gqa, vizwiz_vqa, scienceqa, textvqa -``` diff --git a/docs/zh_cn/source/advanced/Vit_quant&img_dataset.md b/docs/zh_cn/source/advanced/Vit_quant&img_dataset.md deleted file mode 100644 index fcf211292..000000000 --- a/docs/zh_cn/source/advanced/Vit_quant&img_dataset.md +++ /dev/null @@ -1,46 +0,0 @@ -# Vit 量化和 img 数据集 - -llmc目前支持对Vit模型使用图像数据集进行校准并量化 - -## Vit 量化 - -下面是一个配置的例子 - -```yaml -model: - type: Vit - path: /models/vit-base-patch16-224 - torch_dtype: auto -calib: - name: imagenet - type: img - download: False - path: img calib datasets path - n_samples: 32 - bs: 1 - seq_len: 512 # Useless arguments for vit - preproc: img_general - seed: *seed -eval: - eval_pos: [pretrain, fake_quant] - name: imagenet - type: acc # acc: accracy - download: False - path: img datasets path - seq_len: 2048 # Useless arguments for vit - bs: 1 - inference_per_block: False - eval_token_consist: False -``` - -## img 数据集 -img数据集格式要求:img数据集目录下存在图像 - -img数据集格式示例: -``` -images/ -├── image1.jpg -├── image2.jpg -├── image3.jpg -└── ... (other images) -``` diff --git a/docs/zh_cn/source/advanced/custom_dataset.md b/docs/zh_cn/source/advanced/custom_dataset.md deleted file mode 100644 index 8efb48e9a..000000000 --- a/docs/zh_cn/source/advanced/custom_dataset.md +++ /dev/null @@ -1,30 +0,0 @@ -# 自定义校准数据集 - -llmc目前支持以下几种校准数据集 - -1. pileval - -2. wikitext2 - -3. c4 - -4. ptb - -5. custom - -其中custom表示使用用户自定义的校准数据集。某些特定场景下的专有模型,量化的时候的校准数据使用该场景下的数据更为合适。下面是一个配置的例子。 - -``` -calib: - name: custom - download: False - load_from_txt: True - path: 自定义数据集,以txt为后缀结尾 - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: random_truncate_txt - seed: *seed -``` - -用户可以将一条一条数据文本,写到txt文件里面,每一行代表一条文本数据,使用上述的配置,可以实现自定义数据集的校准。 diff --git a/docs/zh_cn/source/advanced/mix_bits.md b/docs/zh_cn/source/advanced/mix_bits.md deleted file mode 100644 index a8deddd1a..000000000 --- a/docs/zh_cn/source/advanced/mix_bits.md +++ /dev/null @@ -1,125 +0,0 @@ -# 层间混合比特量化 - -llmc目前支持了层间混合比特量化,可以实现任意程度的混合。 - -以下是一些设置样例: - -1. 模型整体实现4bit weight-only量化,对所有的down_proj实现8bit weight-only量化 - -``` -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - mix_bits: - setting_0: - layer_name: [down_proj] - do_quant: True - weight: - bit: 8 - symmetric: False - granularity: per_group - group_size: 128 -``` - -2. 模型整体实现4bit weight-only量化,对第0,1,2,3,28,29,30,31个block内的down_proj实现8bit weight-only量化,对所有的o_proj不量化 - -``` -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - mix_bits: - setting_0: - layer_name: [down_proj#0-1-2-3-28-29-30-31] - do_quant: True - weight: - bit: 8 - symmetric: False - granularity: per_group - group_size: 128 - setting_1: - layer_name: [o_proj] - do_quant: False -``` - -3. 模型整体实现W4A4量化,对所有的down_proj实现W8A8量化 - -``` -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_channel - act: - bit: 4 - symmetric: False - granularity: per_token - mix_bits: - setting_0: - layer_name: [down_proj] - do_quant: True - weight: - bit: 8 - symmetric: False - granularity: per_channel - act: - bit: 8 - symmetric: False - granularity: per_token -``` - -4. 一个足够混乱的设置,可能没有现实意义 - -``` -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_channel - act: - bit: 4 - symmetric: False - granularity: per_token - mix_bits: - setting_0: - layer_name: [down_proj#0-1-8-15] - do_quant: True - weight: - bit: 8 - symmetric: False - granularity: per_channel - act: - bit: 8 - symmetric: False - granularity: per_token - setting_1: - layer_name: [down_proj#2-6-4-11, o_proj#2-7] - do_quant: False - setting_2: - layer_name: [down_proj#27] - do_quant: True - weight: - bit: 6 - symmetric: False - granularity: per_channel - act: - bit: 6 - symmetric: False - granularity: per_token - setting_3: - layer_name: [down_proj#13-21] - do_quant: True - weight: - bit: 4 - symmetric: False - granularity: per_channel -``` diff --git a/docs/zh_cn/source/advanced/model_test_v1.md b/docs/zh_cn/source/advanced/model_test_v1.md deleted file mode 100644 index 0ba38d06a..000000000 --- a/docs/zh_cn/source/advanced/model_test_v1.md +++ /dev/null @@ -1,205 +0,0 @@ -# 模型精度测试V1 - -## 精度测试流程 - -llmc支持基础的ppl(perplexity,困惑度)评测,但是更多的下游任务评测,llmc本身并不支持。 - -常见的做法使用评测工具直接对模型进行推理测试,目前已有的评测工具包括但不限于 - -1. [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness) - -2. [opencompass](https://github.com/open-compass/opencompass) - -但是这种评测方法评测效率不高,我们推荐使用**推理引擎评测工具分离**的方式进行模型精度评测,模型由推理引擎进行推理,并以api的形式serving起来,评测工具对该api进行评测。这种方式有以下的好处: - -1. 使用高效的推理引擎进行模型推理,可以加速整个评测进程 - -2. 将模型的推理和模型的评测分离开,各自负责份内专业的事,代码结构更清晰 - -3. 使用推理引擎推理模型,更符合实际部署的场景,和模型实际部署的精度更容易对齐 - -我们在此推荐并介绍使用以下的模型的压缩-部署-评测流程:**llmc压缩-lightllm推理-opencompass评测** - -以下是相关工具的链接: - -1. llmc,大模型压缩工具,[[github](https://github.com/ModelTC/llmc),[文档](https://llmc-zhcn.readthedocs.io/en/latest/)] - -2. lightllm,大模型推理引擎,[[github](https://github.com/ModelTC/lightllm)] - -3. opencompass,大模型评测工具,[[github](https://github.com/open-compass/opencompass),[文档](https://opencompass.readthedocs.io/zh-cn/latest/)] - - -## lightllm推理引擎的使用 - -[lightllm](https://github.com/ModelTC/llmc)官方仓库有着更详细的文档,这里仅给出一个简单快速入门的使用文档 - - 起一个float模型的服务 - -**安装lightllm** - -``` -git clone https://github.com/ModelTC/lightllm.git -cd lightllm -pip install -v -e . -``` - -**起服务** - -``` -python -m lightllm.server.api_server --model_dir 模型路径 \ - --host 0.0.0.0 \ - --port 1030 \ - --nccl_port 2066 \ - --max_req_input_len 6144 \ - --max_req_total_len 8192 \ - --tp 2 \ - --trust_remote_code \ - --max_total_token_num 120000 -``` - -上述命令将在本机的1030端口,起一个2卡的服务 - -上述命令可以通过tp的数量设置,在tp张卡上进行TensorParallel推理,适用于较大的模型的推理。 - -上述命令中的max_total_token_num,会影响测试过程中的吞吐性能,可以根据[lightllm文档](https://github.com/ModelTC/lightllm/blob/main/docs/ApiServerArgs.md),进行设置。只要不爆显存,往往设置越大越好。 - -如果要在同一个机器上起多个lightllm服务,需要重新设定上面的port和nccl_port,不要有冲突即可。 - - - 对服务进行简单测试 - -执行下面的python脚本 - -``` -import requests -import json - -url = 'http://localhost:1030/generate' -headers = {'Content-Type': 'application/json'} -data = { - 'inputs': 'What is AI?', - "parameters": { - 'do_sample': False, - 'ignore_eos': False, - 'max_new_tokens': 128, - } -} -response = requests.post(url, headers=headers, data=json.dumps(data)) -if response.status_code == 200: - print(response.json()) -else: - print('Error:', response.status_code, response.text) -``` - -若上述脚本是有正常返回,说明服务正常 - - 起一个量化模型的服务 - -``` -python -m lightllm.server.api_server --model_dir 模型路径 \ - --host 0.0.0.0 \ - --port 1030 \ - --nccl_port 2066 \ - --max_req_input_len 6144 \ - --max_req_total_len 8192 \ - --tp 2 \ - --trust_remote_code \ - --max_total_token_num 120000 \ - --mode triton_w4a16 -``` - -上述命令加了一个`--mode triton_w4a16`,表示使用了w4a16的naive量化 - -起完服务,同样需要验证一下服务是否正常 - -上述的命令使用的模型路径是原始预训练的模型,并没有经过llmc调整。可以按照llmc的文档,打开save_trans,保存一个调整之后的模型,然后再运行上述的naive量化服务命令 - -## opencompass评测工具的使用 - -[opencompass](https://github.com/open-compass/opencompass)官方仓库有着更详细的文档,这里仅给出一个简单快速入门的使用文档 - -**安装opencompass** - -``` -git clone https://github.com/open-compass/opencompass.git -cd opencompass -pip install -v -e . -``` - -**修改配置文件** - -配置文件在[这里](https://github.com/open-compass/opencompass/blob/main/configs/eval_lightllm.py),这个配置文件是用于opencompass来评测lightllm的api服务的精度,需要注意的是里面的`url`里面的port,要和上述的lightllm的服务port保持一致 - -评测的数据集选择,需要修改这部分代码 - -``` -with read_base(): - from .summarizers.leaderboard import summarizer - from .datasets.humaneval.deprecated_humaneval_gen_a82cae import humaneval_datasets -``` - -上述的代码片段,表示测试humaneval数据集,更多的数据集测试支持,可以查看[这里](https://github.com/open-compass/opencompass/tree/main/configs/datasets) - -**数据集下载** - -需要根据opencompass的[文档](https://opencompass.readthedocs.io/zh-cn/latest/get_started/installation.html#id2),做好数据集的准备 - -**运行精度测试** - -修改好上述的配置文件后,即可运行下面的命令 -``` -python run.py configs/eval_lightllm.py -``` -当模型完成推理和指标计算后,我们便可获得模型的评测结果。其中会在当前目录下生成output文件夹,logs子文件夹记录着评测中的日志,最后生成summary子文件会记录所测数据集的精度 - -## lm-evaluation-harness评测工具的使用 - -我们保留了[lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness)中的命令。只添加了两个参数``--config``和``--quarot``。前者用于加载由``save_trans``保存的转换模型或根据模型路径的原始huggingface模型。如果不使用``quant``部分,则配置中将移除该部分以对全精度模型进行评估,我们只支持RTN量化,其中所有相关的量化粒度需要与转换模型的设置对齐。如果模型经过[QuaRot](https://arxiv.org/abs/2404.00456)转换,则使用后者。 - -``` -export CUDA_VISIBLE_DEVICES=4,5,6,7 -llmc=./llmc -lm_eval=./llmc/lm-evaluation-harness -export PYTHONPATH=$llmc:$PYTHONPATH -export PYTHONPATH=$llmc:$lm_eval:$PYTHONPATH -# Replace the config file (i.e., RTN with algorithm-transformed model path or notate quant with original model path) -# with the one you want to use. `--quarot` is depend on the transformation algorithm used before. -accelerate launch --multi_gpu --num_processes 4 llmc/tools/llm_eval.py \ - --config llmc/configs/quantization/RTN/rtn_quarot.yml \ - --model hf \ - --quarot \ - --tasks lambada_openai,arc_easy \ - --model_args parallelize=False \ - --batch_size 64 \ - --output_path ./save/lm_eval \ - --log_samples -``` - -*备注:请在``--model_args``不使用pretrained=\*同时进行评估时取消并行化(或paralleize=False)。* - -## 常见问题 - -** 问题1 ** - -opencompass中的数据集配置文件,同一个数据集有不同的后缀,表示的是什么意思 - -** 解决方法 ** - -不同后缀表示不同的prompt模板,详细的opencompass问题,可以查看opencompass文档 - -** 问题2 ** - -llama模型的humaneval的测试精度过低 - -** 解决方法 ** - -可能需要将opencompass提供的数据集中的humaneval的jsonl文件里面每一条末尾的\n给删除,再重新测试一下 - -** 问题3 ** - -测试速度还是不够快 - -** 解决方法 ** - -可以考虑lightllm起服务时的max_total_token_num参数设置是否合理,过小的设置,会导致测试并发偏低 - diff --git a/docs/zh_cn/source/advanced/model_test_v2.md b/docs/zh_cn/source/advanced/model_test_v2.md deleted file mode 100644 index e57f51c17..000000000 --- a/docs/zh_cn/source/advanced/model_test_v2.md +++ /dev/null @@ -1,108 +0,0 @@ -# 模型精度测试V2 - -模型精度测试V1中提到的精度测试方式,流程上不够简洁,我们倾听了社区开发者的声音,开发了模型精度测试V2 - -在V2版本中,我们不再需要使用推理引擎起服务,也不需要再去拆成多段流程进行测试。 - -我们的目标是,将下游精度测试等价于PPL测试,运行一个llmc的程序,会在执行完算法之后,直接进行ppl测试,同时也会直接进行对应的下游精度测试。 - -完成上述的目标,我们只需要在已有的config中,添加一个opencompass的设置 - - -``` -base: - seed: &seed 42 -model: - type: Llama - path: model path - torch_dtype: auto -calib: - name: pileval - download: False - path: calib data path - n_samples: 128 - bs: -1 - seq_len: 512 - preproc: pileval_awq - seed: *seed -eval: - eval_pos: [pretrain, fake_quant] - name: wikitext2 - download: False - path: eval data path - bs: 1 - seq_len: 2048 -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - special: - weight_clip: False -save: - save_trans: True - save_path: ./save -opencompass: - cfg_path: opencompass config path - max_num_workers: max num works - output_path: ./oc_output -``` - - opencompass下的cfg_path,需要指向一个opencompass的config路径 - -我们在[这里](https://github.com/ModelTC/llmc/tree/main/configs/opencompass)分别给出了base模型和chat模型的关于human-eval测试的config,作为给大家的参考。 - -需要注意的是[opencompass自带的config](https://github.com/ModelTC/opencompass/blob/opencompass-llmc/configs/models/hf_llama/hf_llama3_8b.py)中,需要有path这个key,而这里我们不需要这个key,因为llmc会默认模型的路径在trans的save路径。 - -当然,因为需要trans的save路径,所以想测试opencompass,就需要设置save_trans为True - - opencompass下的max_num_workers,表示最大的推理实例数 - -假设模型是在单卡上跑的,那么max_num_workers就是表示,要起max_num_workers个推理实例,即占用了max_num_workers张卡。 - -假设模型是在多卡上跑的,即参考下面的多卡并行测试,举例如果模型是在2张卡上进行推理,那么max_num_workers就是表示,要起max_num_workers个推理实例,即占用了2*max_num_workers张卡。 - -综上,所需占用的卡数 = PP数 * max_num_workers - -如果所需占用的卡数超过实际中的卡数,那么就会有worker排队情况。 - -max_num_workers不仅会起多个推理实例,还会把每个数据集进行切分成max_num_workers份,可以理解成是数据并行。 - -所以:最佳的设置方案就是,让所需占用的卡数=实际可用的卡数。 - -比如: - -在一个8卡机器上,某个模型,用单卡跑,则max_num_workers=8 - -在一个8卡机器上,某个模型,用四卡跑,则max_num_workers=2 - -我们尽量让PP数降低,让max_num_workers提高。因为PP并行会变慢,PP仅用在模型实在跑不了的情况,比如70B模型,单卡跑不了,我们就可以设置PP=4,用4个80G显存的卡去跑。 - - opencompass下的output_path,是设置opencompass的评测日志的输出目录 - -在该日志目录中,opencompass会输出推理和评测的日志,推理的具体结果,评测最终的精度等。 - -在运行llmc程序之前,还需要安装做了[llmc适配的opencompass](https://github.com/ModelTC/opencompass/tree/opencompass-llmc) - -``` -git clone https://github.com/ModelTC/opencompass.git -b opencompass-llmc -cd opencompass -pip install -v -e . -pip install human-eval -``` - -根据opencompass的[文档](https://opencompass.readthedocs.io/zh-cn/latest/get_started/installation.html#id2),做好数据集的准备,将数据集,放在你执行命令的当前目录 - -最后你就可以像运行一个正常的llmc程序一样,载入上述的config,进行模型压缩和精度测试 - -## 多卡并行测试 - -如果模型太大,单卡评测放不下,需要使用多卡评测精度,我们支持在运行opencompass时使用pipeline parallel,即PP并行。 - -你需要做的仅仅就是: - -1. 确定哪些卡是可用的,在你的运行脚本最前面,添加到CUDA_VISIBLE_DEVICES中 - -2. 修改opencompass下的cfg_path指向的文件,将里面的num_gpus设置成你需要的数量 diff --git a/docs/zh_cn/source/advanced/sparsification.md b/docs/zh_cn/source/advanced/sparsification.md deleted file mode 100644 index 3e82c266a..000000000 --- a/docs/zh_cn/source/advanced/sparsification.md +++ /dev/null @@ -1,180 +0,0 @@ -# 模型稀疏化 - -llmc目前正在逐渐支持稀疏化方法,目前已经实现了Magnitude,Wanda和ShortGPT将在未来支持更多的算法。 - -以下是Wanda的设置样例: - - -``` -base: - seed: &seed 42 -model: - type: Qwen2 # 设置模型名,可支持Llama,Qwen2,Llava,Gemma2等模型 - path: # 设置模型权重路径 - torch_dtype: auto -calib: - name: pileval - download: False - path: # 设置校准数据集路径 - n_samples: 512 - bs: 1 - seq_len: 512 - preproc: pileval_smooth - seed: *seed -eval: - eval_pos: [pretrain, transformed] # 非结构化稀疏在稀疏过程中直接将对应位置权重置0,transformed之后直接就可以得到稀疏模型,无需再进行额外的部署阶段 - name: wikitext2 - download: False - path: # 设置测试数据集路径 - bs: 1 - seq_len: 2048 -sparse: - method: Wanda - weight: - sparsity: 0.5 # 设置模型的稀疏率 - sparsity_out: False # 设置是否使用每一层稀疏后的输出作为下一层的输入 -save: - save_trans: True # 设置为True,可以保存下调整之后的权重 - save_path: ./save -``` - -以下展示了使用Wanda稀疏的一些结果: - - - - - www.lingdaima.com(零代码excel转HTML) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ModelPPL
dense0.250.50.75
c4wikitext2c4wikitext2c4wikitext2c4wikitext2
LLaMa2-7B7.265.477.465.619.256.85260.42259.91
LLaMa2-70B5.713.325.763.46.494.1732.521.66
LLaMa3-8B9.446.1310.016.4715.079.68336.62290.38
LLaMa3-70B7.162.857.443.229.965.8193.9974.78
- - - -在下面展示了与Wanda[原仓库](https://github.com/locuslab/wanda)对比的结果,在这一实验设置下,所使用的超参数、校准数据集以及数据预处理、评测方法均与Wanda仓库对齐。 - - - - - - www.lingdaima.com(零代码excel转HTML) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ModelWandaLLMC
LLaMa2-7b6.916.91
LLaMa2-70b4.224.19
LLaMa3-8b9.569.58
LLaMa3-70bOOM5.75
- - diff --git a/docs/zh_cn/source/advanced/token_reduction.md b/docs/zh_cn/source/advanced/token_reduction.md deleted file mode 100644 index be12c6ad6..000000000 --- a/docs/zh_cn/source/advanced/token_reduction.md +++ /dev/null @@ -1,68 +0,0 @@ -# Token Reduction - -目前LightCompress支持对主流的多模态大语言模型进行token reduction,配置十分简单,即插即用。 - -下面是一个配置的例子 - -```yaml -base: - seed: &seed 42 -model: - type: Llava - path: model path - torch_dtype: auto -eval: - eval_pos: [pretrain, transformed] - type: vqa - name: [gqa, mmbench_en_dev, mme] - bs: 1 - inference_per_block: False -sparse: - method: TokenReduction - special: - method: FastV - pruning_loc: 3 - rate: 0.778 -save: - save_trans: False - save_fake: False - save_path: /path/to/save/ -``` - -配置文件中包含三大核心内容,包括: - -1. `model` -在模型选择上,可以选择LLaVA,LLaVA-NeXT,Qwen2.5VL以及LLaVA OneVision等,这些模型涵盖了图像任务和视频任务,详细的模型支持列表可以查阅[文件](https://github.com/ModelTC/LightCompress/blob/main/llmc/models/__init__.py),未来LightCompress也会支持更多的模型。 - -2. `eval` -首先,在`eval_pos`参数的选择上,`pretrain`表示原始保留所有视觉token的模型,`transformed`表示应用相应算法进行token reduction的模型。LightCompress接入了[lmms-eval](https://github.com/EvolvingLMMs-Lab/lmms-eval)进行各种下游数据集测评,需要将`type`指定为`vqa`,`name`中的下游测评数据集参考lmms-eval[文档](https://github.com/EvolvingLMMs-Lab/lmms-eval/blob/main/docs/current_tasks.md)中的命名方式。 - -3. `sparse` -`method`需要首先指定为TokenReduction,在`special`中继续指定具体的算法以及相关的一些超参数。由于每个算法对应的超参数不同,详细的可以参考[配置文件](https://github.com/ModelTC/LightCompress/tree/main/configs/sparsification/methods)。 - - -## 结合量化 - -LightCompress也支持同时使用token reduction和量化的极致压缩方案,首先需要选择量化算法存储一个`fake_qunat`模型,可以参考量化板块的文档。其次加载这个模型并在`quant`下加入`token_reduction`字段即可。 - -```yaml -quant: - method: RTN - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - special: - actorder: True - static_groups: True - percdamp: 0.01 - blocksize: 128 - true_sequential: True - quant_out: True - token_reduction: - method: FastV - special: - pruning_loc: 3 - rate: 0.778 -``` \ No newline at end of file diff --git a/docs/zh_cn/source/backend/autoawq.md b/docs/zh_cn/source/backend/autoawq.md deleted file mode 100644 index c58a22bc2..000000000 --- a/docs/zh_cn/source/backend/autoawq.md +++ /dev/null @@ -1,115 +0,0 @@ -# AutoAWQ量化推理 - -[AutoAWQ](https://github.com/casper-hansen/AutoAWQ) 是一个易于使用的 4-bit 权重量化模型的包。与 FP16 相比,**AutoAWQ** 能将模型速度提高 3 倍,并将内存需求减少 3 倍。**AutoAWQ** 实现了激活感知权重量化 (AWQ) 算法,用于对大型语言模型进行量化。 - -**LLMC** 支持导出 **AutoAWQ** 所需的量化格式,并兼容多种算法,不仅限于 AWQ。相比之下,**AutoAWQ** 仅支持 AWQ 算法,而 **LLMC** 可以通过 GPTQ、AWQ 和 Quarot 等算法导出真实量化模型,供 **AutoAWQ** 直接加载,并使用 **AutoAWQ** 的 GEMM 和 GEMV 内核实现推理加速。 - - -## 1.1 环境准备 - -要使用 **AutoAWQ** 进行量化推理,首先需要安装并配置 **AutoAWQ** 环境: -```bash -INSTALL_KERNELS=1 pip install git+https://github.com/casper-hansen/AutoAWQ.git -# NOTE: This installs https://github.com/casper-hansen/AutoAWQ_kernels -``` - -## 1.2 量化格式 - -在 **AutoAWQ** 的定点整型量化中,支持以下几种常见格式: - -- **W4A16**:权重为 int4,激活为 float16; -- **权重 per-channel/group 量化**:按通道或按组进行量化; -- **权重非对称量化**:量化参数包括scale和zero point; - -因此,在使用 **LLMC** 进行模型量化时,必须确保权重和激活的比特数设置为 **AutoAWQ**支持的格式。 - - -## 1.3 使用LLMC量化模型 - - -### 1.3.1 校准数据 - -在本章节中,我们使用**Pileval**和**Wikitext**两个学术数据集作为校准数据,有关于校准数据的下载和预处理请参考[章节](https://llmc-zhcn.readthedocs.io/en/latest/configs.html)。 - -在实际使用中,建议应使用真实部署场景的数据进行离线量化校准。 - - -### 1.3.2 量化算法的选择 - - -**W4A16** - -在 W4A16 的量化设置下,我们建议使用 LLMC 中的 AWQ 算法。 - -具体实现可以参考 AWQ W4A16 的权重量化 [配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/autoawq/awq_w4a16.yml) - -```yaml -# configs/quantization/backend/autoawq/awq_w4a16.yml -quant: - method: Awq - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 - pack_version: gemm_pack - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -``` - -请注意,此步骤中需要将 `pack_version` 参数设置为 `gemm_pack` 或 `gemv_pack`,它们分别对应将 int4 数据打包成 `torch.int32` 的两种方式,适用于 **AutoAWQ** 中加载 `GEMM` 和 `GEMV` 内核时的不同需求。关于 `GEMM` 和 `GEMV` 的区别,请参阅此[链接](https://github.com/casper-hansen/AutoAWQ/tree/main?tab=readme-ov-file#int4-gemm-vs-int4-gemv-vs-fp16)。 - - -此外,如果 AWQ 无法满足精度需求,还可以尝试其他算法,例如 [GPTQ](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/autoawq/gptq_w4a16.yml)。同时,我们建议使用[此章节](https://llmc-zhcn.readthedocs.io/en/latest/practice/awq_omni.html)中介绍的 **AWQ+OmniQuant 组合算法**,以进一步提升精度。我们也提供了相应的[配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/autoawq/w4a16_combin)供参考。 - - - - -### 1.3.3 真实量化模型导出 - -```yaml -save: - save_autoawq: True - save_path: /path/to/save_for_autoawq_awq_w4/ - -``` -请注意,务必将 `save_autoawq` 设置为 `True`。对于 **W4A16** 的量化设置,LLMC 会将权重打包为 `torch.int32` 形式导出,便于 **AutoAWQ** 直接加载,并且会同时导出量化参数。 - - -### 1.3.4 运行LLMC - -修改运行脚本中的配置文件路径并运行: - -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=awq_for_autoawq -config=${llmc}/configs/quantization/backend/autoawq/awq_w4a16.yml -``` -等LLMC运行结束后,真实量化的模型就会存储在`save.save_path`路径 - -## 1.4 使用AutoAWQ推理模型 - - -### 1.4.1 离线推理 - -我们提供了一个使用 **AutoAWQ** 进行离线推理的[示例](https://github.com/ModelTC/llmc/blob/main/examples/backend/autoawq/infer_with_autoawq.py)。 - -首先,需要将 **AutoAWQ** 的仓库克隆到本地: - -```bash -git clone https://github.com/casper-hansen/AutoAWQ.git -``` - -接着,将[示例](https://github.com/ModelTC/llmc/blob/main/examples/backend/autoawq/infer_with_autoawq.py)中的 `autoawq_path` 替换为你本地的 **AutoAWQ** 仓库路径,并将[示例](https://github.com/ModelTC/llmc/blob/main/examples/backend/autoawq/infer_with_autoawq.py)中的 `model_path`替换为`save.save_path` 中保存的模型路径。然后运行以下命令即可完成推理: - -```bash -cd examples/backend/autoawq - -CUDA_VISIBLE_DEVICES=0 python infer_with_autoawq.py -``` diff --git a/docs/zh_cn/source/backend/lightx2v.md b/docs/zh_cn/source/backend/lightx2v.md deleted file mode 100755 index 9aa3be442..000000000 --- a/docs/zh_cn/source/backend/lightx2v.md +++ /dev/null @@ -1,177 +0,0 @@ - -# lightx2v 量化推理 - -[lightx2v](https://github.com/ModelTC/lightx2v) 是一个专为满足视频生成模型推理需求设计的高效后端。它通过优化内存管理和计算效率,能够显著加速推理过程。 - -**LLMC** 支持导出 lightx2v 所需的量化模型格式,并通过其对多种量化算法的强大支持(如 AWQ、GPTQ、SmoothQuant 等),能够在保证推理速度的同时保持较高的量化精度。将 **LLMC** 与 **lightx2v** 结合使用,可以在不牺牲精度的前提下实现推理加速和内存优化,非常适合需要高效处理视频生成模型的应用场景。 - ---- - -## 1.1 环境准备 - -要使用 **lightx2v** 进行量化推理,首先需要安装并配置相关环境: - -```bash -# 克隆仓库及其子模块 -git clone https://github.com/ModelTC/lightx2v.git lightx2v && cd lightx2v -git submodule update --init --recursive - -# 创建并激活 conda 环境 -conda create -n lightx2v python=3.11 && conda activate lightx2v -pip install -r requirements.txt - -# 为避免版本冲突,单独安装 transformers -pip install transformers==4.45.2 - -# 安装 flash-attention 2 -cd lightx2v/3rd/flash-attention && pip install --no-cache-dir -v -e . - -# 安装 flash-attention 3(仅在 Hopper 架构下) -cd lightx2v/3rd/flash-attention/hopper && pip install --no-cache-dir -v -e . -``` - ---- - -## 1.2 量化格式 - -**lightx2v** 支持以下几种常见的定点量化格式: - -- **W8A8**:权重和激活均为 int8; -- **FP8 (E4M3)**:权重和激活均为 float8; -- **权重 per-channel 量化**; -- **激活 per-token 动态量化**,进一步提升精度; -- **对称量化**(仅使用 scale 参数)。 - -使用 **LLMC** 进行模型量化时,必须确保权重和激活的比特数符合 **lightx2v** 所支持的格式。 - ---- - -## 1.3 使用 LLMC 进行模型量化 - -### 1.3.1 校准数据 - -以 Wan2.1 模型在 I2V 任务为例,校准数据示例可在[此目录](https://github.com/ModelTC/llmc/tree/main/assets/wan_i2v/calib)中找到,用户可根据需求添加更多数据。 - -### 1.3.2 量化算法选择 - -#### **W8A8** - -推荐使用 **SmoothQuant** 算法,配置参考如下 [配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/video_gen/wan_i2v/smoothquant_w_a.yaml): - -```yaml -quant: - video_gen: - method: SmoothQuant - weight: - bit: 8 - symmetric: True - granularity: per_channel - act: - bit: 8 - symmetric: True - granularity: per_token - special: - alpha: 0.75 -``` - -如果 SmoothQuant 无法满足精度需求,可以尝试使用 **AWQ**,相关配置请参考 [AWQ 配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/video_gen/wan_i2v/awq_w_a.yaml)。 - -#### **FP8 动态量化** - -对于 FP8 格式,LLMC 支持权重 per-channel、激活 per-token 动态量化。推荐仍使用 **SmoothQuant**,参考配置如下: - -```yaml -quant: - video_gen: - method: SmoothQuant - weight: - quant_type: float-quant - bit: e4m3 - symmetric: True - granularity: per_channel - use_qtorch: True - act: - quant_type: float-quant - bit: e4m3 - symmetric: True - granularity: per_token - use_qtorch: True - special: - alpha: 0.75 -``` - -请确保将 `quant_type` 设置为 `float-quant`,并将 `use_qtorch` 设置为 `True`,因为 LLMC 的浮点量化依赖于 [QPyTorch](https://github.com/Tiiiger/QPyTorch)。 - -安装 QPyTorch: - -```bash -pip install qtorch -``` - -### 1.3.3 导出真实量化模型 - -```yaml -save: - save_lightx2v: True - save_path: /path/to/save_for_lightx2v/ -``` - -务必将 `save_lightx2v` 设置为 `True`。LLMC 会将权重以 `torch.int8` 或 `torch.float8_e4m3fn` 形式导出,供 lightx2v 直接使用,并附带相应的量化参数。 - -### 1.3.4 运行 LLMC - -编辑运行脚本中的配置路径: - -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=sq_for_lightx2v -config=${llmc}/configs/quantization/video_gen/wan_i2v/smoothquant_w_a.yaml -``` - -运行完成后,真实量化模型会保存在 `save.save_path` 中。 - -### 1.3.5 模型评估 - -以 Wan2.1 在 I2V 任务为例,测试数据在[此目录](https://github.com/ModelTC/llmc/tree/main/assets/wan_i2v/eval),配置参考如下: - -```yaml -eval: - eval_pos: [fake_quant] - type: video_gen - name: i2v - download: False - path: ../assets/wan_i2v/eval/ - bs: 1 - target_height: 480 - target_width: 832 - num_frames: 81 - guidance_scale: 5.0 - output_video_path: ./output_videos_sq/ -``` - -LLMC 会生成使用伪量化模型生成的视频结果。 - ---- - -## 1.4 使用 lightx2v 进行模型推理 - -### 1.4.1 权重结构转换 - -LLMC 导出后,需将模型结构转换为 lightx2v 支持的格式,可使用 [转换脚本](https://github.com/ModelTC/lightx2v/blob/main/examples/diffusers/converter.py): - -```bash -python converter.py -s /path/to/save_for_lightx2v/ -o /path/to/output/ -d backward -``` - -转换后的模型将保存在 `/path/to/output/`。 - -### 1.4.2 离线推理 - -编辑 [推理脚本](https://github.com/ModelTC/lightx2v/blob/main/scripts/run_wan_i2v_advanced_ptq.sh),设置 `model_path` 为 `/path/to/output/`,`lightx2v_path` 为本地路径,然后运行: - -```bash -bash run_wan_i2v_advanced_ptq.sh -``` diff --git a/docs/zh_cn/source/backend/mlcllm.md b/docs/zh_cn/source/backend/mlcllm.md deleted file mode 100644 index 668fd7dbc..000000000 --- a/docs/zh_cn/source/backend/mlcllm.md +++ /dev/null @@ -1,147 +0,0 @@ -# MLC LLM量化推理 - -[MLC LLM](https://github.com/mlc-ai/mlc-llm) 是一个专为大语言模型设计的机器学习编译器和高性能部署引擎。其使命是让每个人都能够在自己的平台上本地开发、优化和部署 AI 模型。 - -**MLC LLM** 支持直接加载由 **AutoAWQ** 导出的真实量化模型。由于 **LLMC** 与 **AutoAWQ** 已无缝集成,**AutoAWQ** 作为 **LLMC** 与 **MLC LLM** 之间的桥梁,极大简化了量化模型的加载与部署流程。 - - - - -## 1.1 环境准备 - -要使用 **MLC LLM** 进行量化推理,首先需要安装并配置 **MLC LLM** 环境,以CUDA 12.2为例: -```bash -python -m pip install --pre -U -f https://mlc.ai/wheels mlc-llm-nightly-cu122 mlc-ai-nightly-cu122 -``` - -## 1.2 量化格式 - -与 [**AutoAWQ**](https://llmc-zhcn.readthedocs.io/en/latest/backend/autoawq.html) 相同。 - - -## 1.3 使用LLMC量化模型 - - -### 1.3.1 校准数据 - -在本章节中,我们使用**Pileval**和**Wikitext**两个学术数据集作为校准数据,有关于校准数据的下载和预处理请参考[章节](https://llmc-zhcn.readthedocs.io/en/latest/configs.html)。 - -在实际使用中,建议应使用真实部署场景的数据进行离线量化校准。 - - -### 1.3.2 量化算法的选择 - - -**W4A16** - -在 W4A16 的量化设置下,我们建议使用 LLMC 中的 AWQ 算法。 - -具体实现可以参考 AWQ W4A16 的权重量化 [配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/mlcllm/awq_w4a16.yml) - -```yaml -# configs/quantization/backend/mlcllm/awq_w4a16.yml -quant: - method: Awq - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 - pack_version: gemm_pack - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -``` - -请注意,此步骤中需要将 `pack_version` 参数设置为 `gemm_pack`,它表示将 int4 数据打包成 `torch.int32`。**MLC LLM** 支持加载由 **AutoAWQ** 的 `GEMM` 内核格式对应的整型权重。 - - -此外,如果 AWQ 无法满足精度需求,还可以尝试其他算法,例如 [GPTQ](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/mlcllm/gptq_w4a16.yml)。同时,我们建议使用[此章节](https://llmc-zhcn.readthedocs.io/en/latest/practice/awq_omni.html)中介绍的 **AWQ+OmniQuant 组合算法**,以进一步提升精度。我们也提供了相应的[配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/mlcllm/w4a16_combin)供参考。 - - - -### 1.3.3 真实量化模型导出 - -```yaml -save: - save_mlcllm: True - save_path: /path/to/save_for_mlcllm_awq_w4/ -``` -请注意,务必将 `save_mlcllm` 设置为 `True`。对于 **W4A16** 的量化设置,LLMC 会将权重打包为 `torch.int32` 形式导出,便于 **MLC LLM** 直接加载,并且会同时导出量化参数。 - - -### 1.3.4 运行LLMC - -修改运行脚本中的配置文件路径并运行: - -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=awq_for_mlcllm -config=${llmc}/configs/quantization/backend/mlcllm/awq_w4a16.yml -``` -等LLMC运行结束后,真实量化的模型就会存储在`save.save_path`路径 - -## 1.4 使用MLC LLM推理模型 - - -### 1.4.1 生成 MLC 配置 - -第一步是生成 **MLC LLM** 的配置文件。 - -```bash -export LOCAL_MODEL_PATH=/path/to/llama2-7b-chat/ # 本地模型存放路径 -export MLC_MODEL_PATH=./dist/llama2-7b-chat-MLC/ # 处理后模型的 MLC 存放路径 -export QUANTIZATION=q4f16_autoawq # 量化选项, LLMC目前只支持q4f16_autoawq格式的量化 -export CONV_TEMPLATE=llama-2 # 对话模板选项 - - -mlc_llm gen_config $LOCAL_MODEL_PATH \ - --quantization $QUANTIZATION \ - --conv-template $CONV_TEMPLATE \ - -o $MLC_MODEL_PATH -``` -配置生成命令接收本地模型路径、**MLC LLM** 输出的目标路径、**MLC LLM** 中的对话模板名称以及量化格式。这里的量化选项 `q4f16_autoawq` 表示使用 **AutoAWQ** 中的 `w4a16` 量化格式,而对话模板 `llama-2` 是 **MLC LLM** 中 `Llama-2` 模型的模板。 - - - -### 1.4.2 编译模型库 - -以下是在 **MLC LLM** 中编译模型库的示例命令: - -```bash -export MODEL_LIB=$MLC_MODEL_PATH/lib.so -mlc_llm compile $MLC_MODEL_PATH -o $MODEL_LIB -``` - -### 1.4.3 转换模型权重 - -在这一步,我们将模型权重转换为 **MLC LLM** 格式。 - -```bash -export LLMC_MODEL_PATH=/path/to/save_for_mlcllm_awq_w4/ #LLMC导出的真实量化模型 -mlc_llm convert_weight $LOCAL_MODEL_PATH \ - --quantization $QUANTIZATION \ - -o $MLC_MODEL_PATH \ - --source-format awq \ - --source $LLMC_MODEL_PATH/mlcllm_quant_model/model.safetensors - -``` -在上述模型转换过程中,将 `$LLMC_MODEL_PATH` 替换为 `save.save_path`,`--source-format` 表示 **LLMC** 传递给 **MLC LLM** 的是 **AutoAWQ** 格式的权重,而 `--source` 指的是 **LLMC** 导出的真实量化张量,即存储在 `save.save_path` 的权重张量。转换完成后的结果将存放在 **MLC LLM** 使用 `-o` 指定的输出路径,之后即可直接用于 **MLC LLM** 的推理。 - - -### 1.4.4 运行MLC LLM引擎 - -我们提供了一个运行 **MLC LLM** 引擎进行推理的[示例](https://github.com/ModelTC/llmc/blob/main/examples/backend/mlcllm/infer_with_mlcllm.py)。 - -将[示例](https://github.com/ModelTC/llmc/blob/main/examples/backend/mlcllm/infer_with_mlcllm.py)中的 `model_path` 替换为**MLC LLM** 的输出路径,然后运行以下命令即可完成推理: - -```bash -cd examples/backend/mlcllm - -python infer_with_mlcllm.py -``` diff --git a/docs/zh_cn/source/backend/sglang.md b/docs/zh_cn/source/backend/sglang.md deleted file mode 100644 index 72fd2279a..000000000 --- a/docs/zh_cn/source/backend/sglang.md +++ /dev/null @@ -1,228 +0,0 @@ -# Sglang量化推理 - -[Sglang](https://github.com/sgl-project/sglang) 是一个快速服务的大型语言模型和视觉语言模型框架。通过共同设计后端运行时和前端语言,它可以使你与模型的交互更加快速和可控。 - - -## 1.1 环境准备 - -要使用 Sglang 进行量化推理,首先需要安装并配置 Sglang 环境: -```bash -pip install --upgrade pip -pip install "sglang[all]" - -# Install FlashInfer CUDA kernels -pip install flashinfer -i https://flashinfer.ai/whl/cu121/torch2.4/ -``` - -## 1.2 量化格式 - -与 [**VLLM**](https://llmc-zhcn.readthedocs.io/en/latest/backend/vllm.html) 相同。 - -## 1.3 使用LLMC量化模型 - - -### 1.3.1 校准数据 - -在本章节中,我们使用**Plieval**和**Wikitext**两个学术数据集作为校准数据,有关于校准数据的下载和预处理请参考[章节](https://llmc-zhcn.readthedocs.io/en/latest/configs.html)。 - -在实际使用中,建议应使用真实部署场景的数据进行离线量化校准。 - - -### 1.3.2 量化算法的选择 - -**W8A16** - -在 W8A16 的量化设置下,大语言模型的精度通常不会出现明显问题。在这种情况下,我们建议使用最简单的 RTN(Round to Nearest)算法,该算法不需要额外的校准步骤,运行速度较快。 - -具体实现可以参考 RTN W8A16 的权重量化 [配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/sglang/rtn_w8a16.yml) - -```yaml -# configs/quantization/backend/sglang/rtn_w8a16.yml -quant: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_group - group_size: 128 - need_pack: True -``` -请注意,在此步骤中需要将 `need_pack` 参数设置为 `True`, 这会将8-bit的权重`打包`为`torch.int32`的格式供Sglang直接加载推理。 - -**W4A16** - -在 W4A16 的量化设置下,RTN(Round to Nearest)不能保证精度无问题,因此需要使用一些高阶量化算法来维持模型的精度。在这种情况下,我们建议使用 **LLMC** 中的 AWQ 算法. - - -具体实现可以参考 AWQ W4A16 的权重量化 [配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/sglang/awq_w4a16.yml) - -```yaml -# configs/quantization/backend/sglang/awq_w4a16.yml -quant: - method: Awq - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 - need_pack: True - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -``` -请注意,在此步骤中需要将 `need_pack` 参数设置为 `True`, 这会将4-bit的权重`打包`为`torch.int32`的格式存储,供**SGlang**直接加载推理。 - - -此外,如果 AWQ 无法满足精度需求,我们建议使用 [章节](https://llmc-zhcn.readthedocs.io/en/latest/practice/awq_omni.html)介绍的 **AWQ+OmniQuant 组合算法** 来进一步提升精度。在此也给出相应的[配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/sglang/w4a16_combin) - - -**W8A8** - -在 W8A8 的量化设置下,我们同样建议使用 AWQ 算法。AWQ 在大多数情况下的表现优于 SmoothQuant 和 OS+,能够提供更好的量化精度。 - -具体的实现可以参考 AWQ W8A8 的 [配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/sglang/awq_w8a8.yml)。 - -```yaml -# configs/quantization/backend/sglang/awq_w8a8.yml -quant: - method: Awq - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -``` - -此外,如果 AWQ 无法满足精度需求,我们建议使用 [章节](https://llmc-zhcn.readthedocs.io/en/latest/practice/quarot_gptq.html) 介绍的 **Quarot+GPTQ 组合算法** 来进一步提升精度。在此也给出相应的[配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/sglang/w8a8_combin) - -**FP8-Dynamic** - -在 FP8 的量化中,**LLMC** 支持权重per-channel,激活动态per-token的量化,在这种情况下,使用RTN(Round to Nearest)算法就足够了。然而,我们仍然建议使用AWQ算法以获得更好的量化精度。具体的实现可以参考AWQ FP8的[配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/vllm/fp8/awq_fp8.yml)。 - -```yaml -# configs/quantization/backend/vllm/fp8/awq_fp8.yml -quant: - method: Awq - quant_type: float_quant - weight: - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_channel - use_qtorch: True - act: - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_token - use_qtorch: True - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -``` - -请确保将 `quant_type` 设置为 `float_quant`,表示浮点量化。同时,将 `use_qtorch` 设置为 `True`,因为 `LLMC` 的浮点量化实现依赖 [QPyTorch](https://github.com/Tiiiger/QPyTorch) 库中的部分功能。 - -您可以使用以下命令来安装 [QPyTorch](https://github.com/Tiiiger/QPyTorch): - -```bash -pip install qtorch -``` - -**FP8-Static** - -在 FP8 的量化中,**LLMC** 同时也支持权重per-tensor,激活静态per-tensor的量化,在这种情况下,我们建议使用AWQ算法,调整下激活的范围,可以参考AWQ FP8静态量化的[配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/vllm/fp8/awq_fp8_static.yml)。 - -```yaml -# configs/quantization/backend/vllm/fp8/awq_fp8_static.yml -quant: - method: Awq - quant_type: float-quant - weight: - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_tensor - use_qtorch: True - act: - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_tensor - use_qtorch: True - static: True -``` - - -### 1.3.3 真实量化模型导出 - -```yaml -save: - save_sgl: True - save_path: /path/to/save_for_sglang_rtn_w8a16/ -``` -请注意,务必将 `save_sgl` 设置为 `True`。对于 **W4A16** 和 **W8A16** 的量化设置,LLMC 会将权重打包为 `torch.int32` 形式导出,便于 SGlang 直接加载,并且会同时导出量化参数。 - -对于 **W8A8** 的量化设置,LLMC 会将权重量化为 `torch.int8` 形式导出,便于 SGlang 直接加载,同时也会导出相关的量化参数。 - - -### 1.3.4 运行LLMC - -修改运行脚本中的配置文件路径并运行: - -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=rtn_for_sglang -config=${llmc}/configs/quantization/backend/sglang/rtn_w8a16.yml -``` -等LLMC运行结束后,真实量化的模型就会存储在`save.save_path`路径 - -## 1.4 使用Sglang推理模型 - -### 1.4.1 推理服务 - -默认情况下,它会在 http://localhost:10000 启动服务器。`model_path`替换成`save.save_path`路径下保存的`量化模型`即可。 - -启动服务: - -``` -python -m sglang.launch_server --model-path model_path -``` - -调用服务: - -``` -curl http://localhost:10000/generate \ - -H "Content-Type: application/json" \ - -d '{ - "text": "Once upon a time,", - "sampling_params": { - "max_new_tokens": 16, - "temperature": 0 - } - }' -``` - -同时,我们构建了一个使用 **Sglang** 进行推理的[示例](https://github.com/ModelTC/llmc/blob/main/examples/backend/sglang/infer_with_sglang.py)。 - -```bash -cd examples/backend/sglang - -python infer_with_sglang.py -``` \ No newline at end of file diff --git a/docs/zh_cn/source/backend/vllm.md b/docs/zh_cn/source/backend/vllm.md deleted file mode 100644 index d5e9bc6e6..000000000 --- a/docs/zh_cn/source/backend/vllm.md +++ /dev/null @@ -1,242 +0,0 @@ -# VLLM量化推理 - -[VLLM](https://github.com/vllm-project/vllm) 是一个专门为满足大规模语言模型推理需求设计的高效后端。它通过优化内存管理和计算效率,能够显著加速推理过程。 - -LLMC 支持导出 VLLM 所需的量化模型格式,并通过其强大的多算法支持(如 AWQ、GPTQ、QuaRot 等),能够在保证推理速度的同时保持较高的量化精度。通过 **LLMC** 和 **VLLM** 的结合,用户可以在不牺牲精度的情况下实现推理加速和内存优化,使其非常适合需要高效处理大规模语言模型的场景 - - - -## 1.1 环境准备 - -要使用 VLLM 进行量化推理,首先需要安装并配置 VLLM 环境: -```bash -pip install vllm -``` - -## 1.2 量化格式 - -在 **VLLM** 的定点整型量化中,支持以下几种常见格式: - -- **W4A16**:权重为 int4,激活为 float16; -- **W8A16**:权重为 int8,激活为 float16; -- **W8A8**:权重为 int8,激活为 int8; -- **FP8 (E4M3, E5M2)**:权重为 float8,激活为 float8; -- **权重 per-channel/group 量化**:按通道或按组进行量化; -- **权重 per-tensor 量化**:按tensor进行量化; -- **激活 per-token 动态量化**:针对每个 token 的动态量化方式,进一步提升量化精度。 -- **激活 per-tensor 静态量化**:针对每个 tensor 的静态量化方式,进一步提升效率。 -- **权重\激活对称量化**:量化参数包括scale; - -因此,在使用 **LLMC** 进行模型量化时,必须确保权重和激活的比特数设置为 VLLM 支持的格式。 - - -## 1.3 使用LLMC量化模型 - - -### 1.3.1 校准数据 - -在本章节中,我们使用**Plieval**和**Wikitext**两个学术数据集作为校准数据,有关于校准数据的下载和预处理请参考[章节](https://llmc-zhcn.readthedocs.io/en/latest/configs.html)。 - -在实际使用中,建议应使用真实部署场景的数据进行离线量化校准。 - - -### 1.3.2 量化算法的选择 - -**W8A16** - -在 W8A16 的量化设置下,大语言模型的精度通常不会出现明显问题。在这种情况下,我们建议使用最简单的 RTN(Round to Nearest)算法,该算法不需要额外的校准步骤,运行速度较快。 - -具体实现可以参考 RTN W8A16 的权重量化 [配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/vllm/rtn_w8a16.yml) - -```yaml -# configs/quantization/backend/vllm/rtn_w8a16.yml -quant: - method: RTN - weight: - bit: 8 - symmetric: True - granularity: per_group - group_size: 128 - need_pack: True -``` -请注意,在此步骤中需要将 `need_pack` 参数设置为 `True`, 这会将8-bit的权重`打包`为`torch.int32`的格式供VLLM直接加载推理。 - -**W4A16** - -在 W4A16 的量化设置下,RTN(Round to Nearest)不能保证精度无问题,因此需要使用一些高阶量化算法来维持模型的精度。在这种情况下,我们建议使用 **LLMC** 中的 AWQ 算法. - - -具体实现可以参考 AWQ W4A16 的权重量化 [配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/vllm/awq_w4a16.yml) - -```yaml -# configs/quantization/backend/vllm/awq_w4a16.yml -quant: - method: Awq - weight: - bit: 4 - symmetric: True - granularity: per_group - group_size: 128 - need_pack: True - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -``` -请注意,在此步骤中需要将 `need_pack` 参数设置为 `True`, 这会将4-bit的权重`打包`为`torch.int32`的格式存储,供**VLLM**直接加载推理。 - - -此外,如果 AWQ 无法满足精度需求,我们建议使用 [章节](https://llmc-zhcn.readthedocs.io/en/latest/practice/awq_omni.html)介绍的 **AWQ+OmniQuant 组合算法** 来进一步提升精度。在此也给出相应的[配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/vllm/w4a16_combin) - - -**W8A8** - -在 W8A8 的量化设置下,我们同样建议使用 AWQ 算法。AWQ 在大多数情况下的表现优于 SmoothQuant 和 OS+,能够提供更好的量化精度。 - -具体的实现可以参考 AWQ W8A8 的 [配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/vllm/awq_w8a8.yml)。 - -```yaml -# configs/quantization/backend/vllm/awq_w8a8.yml -quant: - method: Awq - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -``` - -此外,如果 AWQ 无法满足精度需求,我们建议使用 [章节](https://llmc-zhcn.readthedocs.io/en/latest/practice/quarot_gptq.html) 介绍的 **Quarot+GPTQ 组合算法** 来进一步提升精度。在此也给出相应的[配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/vllm/w8a8_combin) - - -**FP8-Dynamic** - -在 FP8 的量化中,**LLMC** 支持权重per-channel,激活动态per-token的量化,在这种情况下,使用RTN(Round to Nearest)算法就足够了。然而,我们仍然建议使用AWQ算法以获得更好的量化精度。具体的实现可以参考AWQ FP8的[配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/vllm/fp8/awq_fp8.yml)。 - -```yaml -# configs/quantization/backend/vllm/fp8/awq_fp8.yml -quant: - method: Awq - quant_type: float_quant - weight: - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_channel - use_qtorch: True - act: - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_token - use_qtorch: True - special: - trans: True - trans_version: v2 - weight_clip: True - quant_out: True -``` - -请确保将 `quant_type` 设置为 `float_quant`,表示浮点量化。同时,将 `use_qtorch` 设置为 `True`,因为 `LLMC` 的浮点量化实现依赖 [QPyTorch](https://github.com/Tiiiger/QPyTorch) 库中的部分功能。 - -您可以使用以下命令来安装 [QPyTorch](https://github.com/Tiiiger/QPyTorch): - -```bash -pip install qtorch -``` - -**FP8-Static** - -在 FP8 的量化中,**LLMC** 同时也支持权重per-tensor,激活静态per-tensor的量化,在这种情况下,我们建议使用AWQ算法,调整下激活的范围,可以参考AWQ FP8静态量化的[配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend/vllm/fp8/awq_fp8_static.yml)。 - -```yaml -# configs/quantization/backend/vllm/fp8/awq_fp8_static.yml -quant: - method: Awq - quant_type: float-quant - weight: - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_tensor - use_qtorch: True - act: - # Support ["e4m3", "e5m2"] - bit: e4m3 - symmetric: True - granularity: per_tensor - use_qtorch: True - static: True -``` - -### 1.3.3 真实量化模型导出 - -```yaml -save: - save_vllm: True - save_path: /path/to/save_for_vllm_rtn_w8a16/ -``` -请注意,务必将 `save_vllm` 设置为 `True`。对于 **W4A16** 和 **W8A16** 的量化设置,LLMC 会将权重打包为 `torch.int32` 形式导出,便于 VLLM 直接加载,并且会同时导出量化参数。 - -对于 **W8A8** 的量化设置,LLMC 会将权重量化为 `torch.int8` 形式导出,便于 VLLM 直接加载,同时也会导出相关的量化参数。 - - -### 1.3.4 运行LLMC - -修改运行脚本中的配置文件路径并运行: - -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=rtn_for_vllm -config=${llmc}/configs/quantization/backend/vllm/rtn_w8a16.yml -``` -等LLMC运行结束后,真实量化的模型就会存储在`save.save_path`路径 - -## 1.4 使用VLLM推理模型 - -### 1.4.1 离线推理 - -我们构建了一个使用 **vLLM** 对数据集进行离线批量推理的[示例](https://github.com/ModelTC/llmc/blob/main/examples/backend/vllm/infer_with_vllm.py)。只需要将[示例](https://github.com/ModelTC/llmc/blob/main/examples/backend/vllm/infer_with_vllm.py) 中的 `model_path`替换为 `save.save_path` 路径,然后运行以下命令即可: - -```bash -cd examples/backend/vllm - -python infer_with_vllm.py -``` - - -### 1.4.2 推理服务 - -vLLM 可以作为一个实现 OpenAI API 协议的服务器进行部署。这使得 vLLM 可以作为使用 OpenAI API 的应用程序的即插即用替代方案。默认情况下,它会在 http://localhost:8000 启动服务器。你可以通过 --host 和 --port 参数来指定地址。`model_path`替换成保存的`量化模型`即可。 - -启动服务: - -``` -vllm serve model_path -``` - -调用服务: - -``` -curl http://localhost:8000/v1/completions \ --H "Content-Type: application/json" \ --d '{ - "model": "model_path", - "prompt": "What is the AI?", - "max_tokens": 128, - "temperature": 0 -}' -``` diff --git a/docs/zh_cn/source/conf.py b/docs/zh_cn/source/conf.py deleted file mode 100644 index f6ef270a4..000000000 --- a/docs/zh_cn/source/conf.py +++ /dev/null @@ -1,110 +0,0 @@ -# Configuration file for the Sphinx documentation builder (中文文档). -# ----------------------------------------------------------------------------- -# 参考 Lightx2v 样式,把原先 trojanzoo_sphinx_theme 改为 sphinx_book_theme, -# 并修正 logo 配置格式。 - -import os -import sys -from typing import List - -# -- Path setup -------------------------------------------------------------- -ROOT_DIR = os.path.abspath(os.path.join(__file__, "../../..")) -sys.path.append(ROOT_DIR) - -# -- 项目信息 --------------------------------------------------------------- -project = "llmc" -copyright = "2024, llmc contributors" -author = "ModelTC" -release = "1.0.0" - -# GitHub 信息 --------------------------------------------------------------- -github_url = "https://github.com/ModelTC/llmc" - -html_context = { - "display_github": True, - "github_user": author, - "github_repo": "llmc", - "github_version": "main", - "conf_py_path": "/docs/zh_cn/source/", # 文档根路径 -} - -# -- 通用配置 ---------------------------------------------------------------- -extensions = [ - "sphinx.ext.napoleon", - "sphinx.ext.viewcode", - "sphinx.ext.intersphinx", - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "myst_parser", - "sphinx_copybutton", - "sphinx.ext.doctest", - "sphinx.ext.mathjax", - "sphinx.ext.ifconfig", - "sphinx.ext.githubpages", - "sphinx.ext.autosectionlabel", - "sphinxcontrib.katex", - "sphinxcontrib.contentui", -] - -templates_path: List[str] = ["_templates"] -exclude_patterns: List[str] = [] - -language = "zh_CN" - -# 复制代码块时去除shell提示符 --------------------------------------------- -copybutton_prompt_text = r"\$ " -copybutton_prompt_is_regexp = True - -# -- HTML 输出选项 ----------------------------------------------------------- -html_title = project -html_theme = "sphinx_book_theme" -html_logo = "images/logo/llmc.svg" -html_static_path = ["_static"] - -html_theme_options = { - "path_to_docs": "docs/zh_cn/source", - "repository_url": github_url, - "use_repository_button": True, - "logo": { - "text": "LLMC", - "image_light": "images/logo/llmc.svg", - "image_dark": "images/logo/llmc.svg", - }, - "doc_items": { - "paper": "https://arxiv.org/abs/2405.06001", - "institution": "https://github.com/ModelTC", - }, -} - -# -- Intersphinx ------------------------------------------------------------- -intersphinx_mapping = { - "python": ("https://docs.python.org/3", {}), - "sphinx": ("https://www.sphinx-doc.org/en/master", {}), -} - -# -- Mock 外部依赖 ----------------------------------------------------------- -autodoc_mock_imports = [ - "torch", - "transformers", - "sentencepiece", - "tensorizer", -] - -# -- 自定义处理 ------------------------------------------------------------- -from sphinx.ext import autodoc # noqa: E402, isort: skip - -class MockedClassDocumenter(autodoc.ClassDocumenter): - """移除“Bases: object”行。""" - - def add_line(self, line: str, source: str, *lineno: int) -> None: - if line == " Bases: :py:class:`object`": - return - super().add_line(line, source, *lineno) - -autodoc.ClassDocumenter = MockedClassDocumenter - -# -- 额外钩子 --------------------------------------------------------------- - -def setup(app): - """可选的 Sphinx setup。""" - pass diff --git a/docs/zh_cn/source/configs.md b/docs/zh_cn/source/configs.md deleted file mode 100644 index 6d61b532d..000000000 --- a/docs/zh_cn/source/configs.md +++ /dev/null @@ -1,468 +0,0 @@ -# 配置的简要说明 - -所有的配置均可以在[这里](https://github.com/ModelTC/llmc/tree/main/configs)找到,具体地,包括[量化算法](https://github.com/ModelTC/llmc/tree/main/configs/quantization/methods),[量化实践以及方法组合技](https://github.com/ModelTC/llmc/tree/main/configs/quantization/combination), 以及[推理后端](https://github.com/ModelTC/llmc/tree/main/configs/quantization/backend)相关的配置 - -下面的是一个简要的配置例子 - -``` -base: - seed: &seed 42 # 设置随机种子 -model: - type: model_type # 模型的类型 - path: model path # 模型的路径 - tokenizer_mode: fast # 模型的tokenizer类型 - torch_dtype: auto # 模型的dtype -calib: - name: pileval # 校准数据集名 - download: False # 校准数据集是否在线下载 - path: calib data path # 校准数据集路径 - n_samples: 512 # 校准数据集的数量 - bs: 1 # 校准数据集的batch size - seq_len: 512 # 校准数据集的长度 - preproc: pileval_smooth # 校准数据集的预处理方式 - seed: *seed # 校准数据集的随机种子 -eval: - eval_pos: [pretrain, transformed, fake_quant] # 评测的位点 - name: wikitext2 # 评测数据集的名字 - download: False # 评测数据集是否在线下载 - path: eval data path # 评测数据集的路径 - bs: 1 # 评测数据集的batch size - seq_len: 2048 # 评测数据集的长度 - eval_token_consist: False # 是否评测量化模型和原始模型输出token的一致性 -quant: - method: SmoothQuant # 压缩方法 - weight: - bit: 8 # 权重的量化bit数 - symmetric: True # 权重量化是否是对称量化 - granularity: per_channel # 权重量化的粒度 - act: - bit: 8 # 激活的量化bit数 - symmetric: True # 激活量化是否是对称量化 - granularity: per_token # 激活量化的粒度 - speical: # 量化算法需要的特殊参数,可参照每个算法的配置文件的注释以及原论文掌握其用法 -save: - save_vllm: False # 是否保存真实量化的模型,以供VLLM推理 - save_sgl: False # 是否保存真实量化的模型,以供Sglang推理 - save_autoawq: False # 是否保存真实量化的模型,以供AutoAWQ推理 - save_mlcllm: False # 是否保存真实量化的模型,以供MLC-LLM推理 - save_trans: False # 是否保存权重变换之后的模型 - save_fake: False # 是否保存伪量化的权重 - save_path: /path/to/save # 保存路径 -``` - -# 配置的详细说明 - -## base - - base.seed - -设置随机种子,用于整个框架的所有随机种子的设定 - -## model - - model.type - -模型的类型,可支持Llama,Qwen2,Llava,Gemma2等模型,可以从[这里](https://github.com/ModelTC/llmc/blob/main/llmc/models/__init__.py)查看llmc支持的所有模型 - - model.path - -模型的权重路径,llmc目前只支持hugging face格式的模型,可以用以下的代码检测是否可以正常load - -``` -from transformers import AutoModelForCausalLM, AutoConfig - - -model_path = # 模型的权重路径 -model_config = AutoConfig.from_pretrained( - model_path, trust_remote_code=True -) -model = AutoModelForCausalLM.from_pretrained( - model_path, - config=model_config, - trust_remote_code=True, - torch_dtype="auto", - low_cpu_mem_usage=True, -) - -print(model) -``` -如果上述代码不可以load你所给的模型,可能原因有 - -1. 你的模型格式并不是hugging face格式 - -2. 你的tansformers版本太低了,可以执行`pip install transformers --upgrade`升级 - -llmc运行之前确保上述代码能加载成功你的模型,否则llmc也无法加载你的模型 - - model.tokenizer_mode - -选择使用slow还是fast的tokenizer - - model.torch_dtype - -设置模型权重的数据类型,可以选择以下几种类型 - -1. auto - -2. torch.float16 - -3. torch.bfloat16 - -3. torch.float32 - -其中auto将跟随权重文件原本的数据类型设置 - - -## calib - - calib.name - -校准数据集的名称,目前支持以下几种校准数据集 - -1. pileval - -2. wikitext2 - -3. c4 - -4. ptb - -5. custom - -其中custom表示使用用户自定义的校准数据集,具体使用说明参考文档的进阶用法的[自定义校准数据集章节](https://llmc-zhcn.readthedocs.io/en/latest/advanced/custom_dataset.html) - - calib.download - -表示该校准数据集是否需要运行时在线下载 - -如果设置True,则无需设置calib.path,llmc会自动联网下载数据集 - -如果设置False,则需要设置calib.path,llmc会从该地址读取数据集,全程也无需联网运行llmc - - calib.path - -如果设置calib.download为False,则需要设置calib.path,表示存储校准数据集的路径 - -其中该路径存储的数据,需要是arrow格式的数据集 - -从hugging face下载arrow格式的数据集,可以使用以下代码 -``` -from datasets import load_dataset -calib_dataset = load_dataset(数据集名) -calib_dataset.save_to_disk(保存路径) -``` -加载该格式的数据集可以使用 -``` -from datasets import load_from_disk -data = load_from_disk(数据集路径) -``` -llmc已经提供了上述数据集的下载脚本 - -校准数据集可以在[这里](https://github.com/ModelTC/llmc/blob/main/tools/download_calib_dataset.py)下载 - -执行命令是`python download_calib_dataset.py --save_path [校准数据集保存路径]` - -测试数据集可以在[这里](https://github.com/ModelTC/llmc/blob/main/tools/download_eval_dataset.py)下载 - -执行命令是`python download_eval_dataset.py --save_path [测试数据集保存路径]` - -如果用户想用更多的数据集,就可以参考上面的arrow格式数据集的下载方式,自行修改 - - calib.n_samples - -选择n_samples条数据用于校准 - - calib.bs - -将校准数据以calib.bs为batch size进行打包,如果设置为-1,表示将全部数据打包成一个batch数据 - - calib.seq_len - -校准数据的长度 - - calib.preproc - -校准数据的预处理方式,目前llmc实现了多种预处理方式 - -1. wikitext2_gptq - -2. ptb_gptq - -3. c4_gptq - -4. pileval_awq - -5. pileval_smooth - -6. pileval_omni - -7. general - -8. random_truncate_txt - -除了general,其余预处理均可以在[这里](https://github.com/ModelTC/llmc/blob/main/llmc/data/dataset/specified_preproc.py)找到实现方式 - -general在[base_dataset](https://github.com/ModelTC/llmc/blob/main/llmc/data/dataset/base_dataset.py)中的general_preproc函数中实现 - - calib.seed - -数据预处理中的随机种子,默认跟随base.seed的设置 - - -## eval -llmc默认支持评测量化模型的困惑度(PPL), 以及量化模型和原始模型输出token的一致性。此外还支持通过harness和opencompass评测下游任务的精度(可见[评测章节v1](https://llmc-zhcn.readthedocs.io/en/latest/advanced/model_test_v1.md)和[v2](https://llmc-zhcn.readthedocs.io/en/latest/advanced/model_test_v2.md)) - - eval.eval_pos - -表示评测PPL的位点,目前支持三个位点可以被评测 - -1. pretrain - -2. transformed - -3. fake_quant - -eval_pos需要给一个列表,列表可以为空,空列表表示不进行测试 - - eval.name - -测试数据集的名称,目前支持以下几种测试数据集 - -1. wikitext2 - -2. c4 - -3. ptb - -测试数据集下载方式参考calib.name校准数据集 - - eval.download - -表示该测试据集是否需要运行时在线下载,参考calib.download - - eval.path - -参考calib.path - - eval.bs - -测试的batch size - - eval.seq_len - -测试的数据长度 - - eval.inference_per_block - -llmc仅支持单卡运行,如果你的模型太大,在测试的时候,单张卡的显存放不下整个模型,那么就需要打开inference_per_block,使用per block进行推理测试,同时在不爆显存的前提下,适当提高bs以提高推理速度 - -下面的是一个配置例子 -``` -bs: 10 -inference_per_block: True -``` - - 同时测试多个数据集 - -llmc也支持同时评测多个数据集的PPL - -下面是评测单个wikitext2数据集的例子 - -``` -eval: - name: wikitext2 - path: wikitext2的数据集路径 -``` - -下面是评测多个数据集的例子 - -``` -eval: - name: [wikitext2, c4, ptb] - path: 这几个数据集的共有上层目录 -``` - -需要注意的是,多个数据集评测的name需要以列表形式表示,同时需要遵循以下目录规则 - -- 共有上层目录 - - wikitext2 - - c4 - - ptb - -如果直接使用llmc的[下载脚本](https://github.com/ModelTC/llmc/blob/main/tools/download_eval_dataset.py),则共有上层目录就是`--save_path`所指定的数据集保存路径 - - eval.eval_token_consist - -表示是否评测量化模型和原始模型输出token的一致性,取值范围[0,1], 越接近1越说明量化模型的性能越接近原始模型 - -## quant - - quant.method - -使用的量化算法名,llmc支持的所有量化算法可以在[这里](https://github.com/ModelTC/llmc/blob/main/llmc/compression/quantization/__init__.py)查看 - - quant.weight - -权重的量化设置 - - quant.weight.bit - -权重的量化bit数 - - quant.weight.symmetric - -权重的量化对称与否 - - quant.weight.granularity - -权重的量化粒度,支持以下粒度 - -1. per_tensor - -2. per_channel - -3. per_group - - quant.weight.group_size - -当权重是per-group量化时,其表示group的大小 - - quant.weight.ste - -在权重量化的取整过程中,是否用直通估计器(straight-through estimator)来使round函数可以产生梯度以便进行反向传播 - - quant.weight.calib - -权重的校准方法,默认采用minmax,除此之外,llmc还支持learnable,mse两种方法,可能会取得更好的结果 - - - quant.act - -激活的量化设置 - - quant.act.bit - -激活的量化bit数字 - - quant.act.symmetric - -激活的量化对称与否 - - quant.act.granularity - -激活的量化粒度,支持以下粒度 - -1. per tensor - -2. per token - -3. per head - - quant.act.ste - -在激活量化的取整过程中,是否用直通估计器(straight-through estimator)来使round函数可以产生梯度以便进行反向传播 - - quant.act.calib - -激活的校准方法,默认采用minmax,且只支持minmax - -其中如果quant.method设置的为RTN,激活量化可以支持静态per tensor设置,下面是,权重静态per-channel量化,激活静态per tensor量化的配置和激活动态per token 8bit量化的配置 - -``` -quant: - method: RTN - # 静态per-channel量化 - weight: - bit: 8 - symmetric: True - granularity: per_channel - - # 静态per-tensor量化 - act: - bit: 8 - symmetric: True - granularity: per_tensor - static: True -``` - -``` -quant: - method: RTN - #静态per-channel量化 - weight: - bit: 8 - symmetric: True - granularity: per_channel - - # 动态per-tensor量化 - act: - bit: 8 - symmetric: True - granularity: per_token -``` - -## sparse - - sparse.method - -使用的稀疏化算法名,这包含对[模型的稀疏化](https://github.com/ModelTC/LightCompress/blob/main/llmc/compression/sparsification/__init__.pyn)和对视觉token的[reduction](https://github.com/ModelTC/LightCompress/blob/main/llmc/compression/token_reduction/__init__.py),所有支持算法可以在文件中查看。 - -值得说明的是针对模型稀疏化,需要指定具体的算法名称,而token reduction只需要先指定为`TokenReduction`,在`special`中继续指定具体的算法。 - -```yaml -sparse: - method: Wanda -``` - -```yaml -sparse: - method: TokenReduction - special: - method: FastV -``` - -## save - - save.save_vllm - -是否保存为[VLLM](https://github.com/vllm-project/vllm)推理后端支持的真实量化模型 - -当开启该选项时,你会发现保存的模型权重显著变小(真实量化),同时可以通过VLLM后端来直接加载推理,提高推理速度以及降低显存占用,有关于[VLLM](https://github.com/vllm-project/vllm)推理后端的内容见[该章节](https://llmc-zhcn.readthedocs.io/en/latest/backend/vllm.html) - - save.save_sgl - -是否保存为[Sglang](https://github.com/sgl-project/sglang)推理后端支持的真实量化 - -当开启该选项时,你会发现保存的模型权重显著变小(真实量化),同时可以通过[Sglang](https://github.com/sgl-project/sglang)后端来直接加载推理,提高推理速度以及降低显存占用,有关于[Sglang](https://github.com/sgl-project/sglang)推理后端的内容见[该章节](https://llmc-zhcn.readthedocs.io/en/latest/backend/sglang.html) - - - save.save_autoawq - -是否保存为[AutoAWQ](https://github.com/casper-hansen/AutoAWQ)推理后端支持的真实量化模型 - -当开启该选项时,你会发现保存的模型权重显著变小(真实量化),同时可以通过[AutoAWQ](https://github.com/casper-hansen/AutoAWQ)后端来直接加载推理,提高推理速度以及降低显存占用,有关于[AutoAWQ](https://github.com/casper-hansen/AutoAWQ)推理后端的内容见[该章节](https://llmc-zhcn.readthedocs.io/en/latest/backend/autoawq.html) - - save.save_mlcllm - -是否保存为[MLC-LLM](https://github.com/mlc-ai/mlc-llm)推理后端支持的真实量化模型 - -当开启该选项时,你会发现保存的模型权重显著变小(真实量化),同时可以通过[MLC-LLM](https://github.com/mlc-ai/mlc-llm)后端来直接加载推理,提高推理速度以及降低显存占用,有关于[MLC-LLM](https://github.com/mlc-ai/mlc-llm)推理后端的内容见[该章节](https://llmc-zhcn.readthedocs.io/en/latest/backend/mlcllm.html) - - - save.save_trans - -是否保存调整之后的模型权重 - -保存的该权重,是经过调整之后的更适合量化的权重,其可能包含更少的离群值,其还是以fp16/bf16的格式保存(权重文件大小与原始模型保持一致),在推理引擎中部署的时候,需要开启推理引擎的`naive量化`功能,即可实现量化推理。 - -与`save_vllm`等不同的是,其需要该推理引擎来完成真实量化,而`llmc`提供一个更适合量化的浮点模型权重。 - -例如`SmoothQuant/Os+/AWQ/Quarot`等算法导出的`save_trans`模型,其具有`更少的outliers`,更适合量化。 - - save.save_fake - -是否保存伪量化的模型 - - save.save_path - -保存模型的路径,该路径需要是一个不存在的新的目录路径,否则llmc会终止运行,并发出相应的错误提示 diff --git a/docs/zh_cn/source/images/logo/llmc.svg b/docs/zh_cn/source/images/logo/llmc.svg deleted file mode 100644 index f1cb2bbf7..000000000 --- a/docs/zh_cn/source/images/logo/llmc.svg +++ /dev/null @@ -1,100 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/zh_cn/source/index.rst b/docs/zh_cn/source/index.rst deleted file mode 100644 index eeba71495..000000000 --- a/docs/zh_cn/source/index.rst +++ /dev/null @@ -1,57 +0,0 @@ -.. llmc documentation master file, created by - sphinx-quickstart on Mon Jun 24 10:56:49 2024. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -欢迎使用大模型压缩工具llmc! -================================ - -llmc是一个用于大模型压缩的工具,支持多种模型和多种压缩算法。 - -github链接: https://github.com/ModelTC/llmc - -arxiv链接: https://arxiv.org/abs/2405.06001 - -.. toctree:: - :maxdepth: 2 - :caption: 快速入门 - - quickstart.md - - -.. toctree:: - :maxdepth: 2 - :caption: 配置说明 - - configs.md - - -.. toctree:: - :maxdepth: 2 - :caption: 进阶用法 - - advanced/model_test_v1.md - advanced/model_test_v2.md - advanced/custom_dataset.md - advanced/Vit_quant&img_dataset.md - advanced/VLM_quant&img-txt_dataset.md - advanced/mix_bits.md - advanced/sparsification.md - advanced/token_reduction.md - -.. toctree:: - :maxdepth: 2 - :caption: 量化最佳实践 - - practice/awq.md - practice/awq_omni.md - practice/quarot_gptq.md - -.. toctree:: - :maxdepth: 2 - :caption: 量化推理后端 - - backend/vllm.md - backend/sglang.md - backend/autoawq.md - backend/mlcllm.md \ No newline at end of file diff --git a/docs/zh_cn/source/practice/awq.md b/docs/zh_cn/source/practice/awq.md deleted file mode 100644 index 32bc258cf..000000000 --- a/docs/zh_cn/source/practice/awq.md +++ /dev/null @@ -1,101 +0,0 @@ -# AWQ - -## 1.1 仅权重量化 - - -AWQ 在仅权重量化(weight-only quantization)的大多数情况下表现出色,但在`低比特`(尤其是 `2-bit`)量化时效果较差。这是因为 AWQ 无论是对称量化还是非对称量化,都采用了 `对称策略` 来截断权重。 - -在 LLMC 中,我们对 AWQ 方法进行了改进,将其`权重截断`的策略修改为了和`量化策略`保持一致,例如`非对称量化`使用`非对称截断`,`对称量化`使用`对称截断`, 获得了更优的结果,尤其是在低比特量化。 - -### 1.1.1 算法配置 - -具体实现可以参考 AWQ 的权重量化 [配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/methods/Awq/awq_w_only.yml) - -```yaml -# configs/quantization/methods/Awq/awq_w_only.yml -quant: - method: Awq - weight: - bit: 4 - symmetric: False - granularity: per_group - group_size: 128 - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - # But their results don't differ significantly. - trans_version: v2 - weight_clip: True -``` - -### 1.1.2 算法运行 - -只需修改 [运行脚本](https://github.com/ModelTC/llmc/tree/main/scripts/run_llmc.sh) 中的配置文件路径,然后执行即可: - -运行脚本: -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=awq_w4a16 -config=${llmc}/configs/quantization/methods/Awq/awq_w_only.yml -``` - -通过这一改进,AWQ-LLMC 可以取得比于 [原始方法](https://github.com/mit-han-lab/llm-awq) 更好的精度表现,尤其在2-bit量化,表现出显著的改善。 - - -如果在 `config.quant.special` 中未指定 `clip_sym`,那么它的取值将与 `config.quant.weight.symmetric` 保持一致。如果想复现学术精度,可以将 `clip_sym` 写到config里并设置为 `True`: - -```yaml -quant: - special: - clip_sym: True -``` - - -## 1.2 权重-激活量化 - -此外,与原始方法不同,LLMC 中的 AWQ 还支持权重-激活量化。相比于 OS+ 和 SmoothQuant 仅支持对 `ln` 和 `fc` 层进行缩放变换,AWQ 提供了更多等价变换的位置选择。 - -同时,AWQ 通过网格搜索(Grid Search)寻找权重变换的最优缩放因子,因此在权重-激活量化方面通常能够取得更优异的效果。 - -### 1.2.1 算法配置 - -具体可以参考 AWQ 的权重-激活量化 [配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/methods/Awq/awq_w_a.yml) - -```yaml -# configs/quantization/methods/Awq/awq_w_a.yml -quant: - method: Awq - weight: - bit: 8 - symmetric: True - granularity: per_channel - group_size: -1 - act: - bit: 8 - symmetric: True - granularity: per_token - special: - trans: True - # The options for "trans_version" include "v1" and "v2". - trans_version: v2 - weight_clip: True -``` - -只需修改 [运行脚本](https://github.com/ModelTC/llmc/tree/main/scripts/run_llmc.sh) 中的配置文件路径,然后执行即可: - -### 1.2.2 算法运行 - -运行脚本: -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=awq_w8a8 -config=${llmc}/configs/quantization/methods/Awq/awq_w_a.yml -``` - -在权重-激活量化中,AWQ-LLMC 可以取得比SmoothQuant等算法更好的结果 diff --git a/docs/zh_cn/source/practice/awq_omni.md b/docs/zh_cn/source/practice/awq_omni.md deleted file mode 100644 index a9812691f..000000000 --- a/docs/zh_cn/source/practice/awq_omni.md +++ /dev/null @@ -1,142 +0,0 @@ -# AWQ + OmniQuant - -OmniQuant 使用 可学习的权重截断(Learnable Weight Clipping,`LWC`)和 可学习的等效变换(Learnable Equivalent Transformation,`LET`)来优化量化模型,与非基于学习的算法相比,往往能够获得更好的性能。然而,由于训练过程中的不稳定性以及对超参数的敏感性,OmniQuant 需要大量时间来精细调整超参数,这不仅增加了训练成本,还容易导致训练效果不佳。 - -为了解决这些问题,在 LLMC 中,我们对 OmniQuant 进行了改进。我们使用 AWQ 生成`截断参数`和`变换参数`,并将其分别作为 OmniQuant 中 `LWC` 和 `LET` 的初始化。事实证明,这种优质的初始化能够大幅缩短 OmniQuant 的训练时间,同时提升其精度表现。 - - -## 1.1 仅权重量化 - -以 `w4a16g128` 设置为例,我们提供了[AWQ 和 OmniQuant 的组合配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/combination/awq_comb_omni/w4a16g128)。 - - -### 1.1.1 运行AWQ - -**第一步**,运行与 AWQ 相关的[配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/combination/awq_comb_omni/w4a16g128/step_1_awq.yml)。请注意,在此步骤中需要将 `save_trans` 参数设置为 `True` 以保存经过变换的模型。 - -```yaml -# configs/quantization/combination/awq_comb_omni/w4a16g128/step_1_awq.yml - -save: - # Save the AWQ-transformed model for omniquant. - save_trans: True - save_fake: False - save_path: /path/to/save_awq_trans/ -``` -运行脚本: -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=step_1_awq -config=${llmc}/configs/quantization/combination/awq_comb_omni/w4a16g128/step_1_awq.yml -``` -### 1.1.2 运行OmniQuant - -**第二步**,加载AWQ变换过的模型并运行与 OmniQuant 相关的[配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/combination/awq_comb_omni/w4a16g128/step_2_omniq.yml)。 -请注意,在此步骤中需要将 `search_clip_init` 参数设置为 `True` 以使用AWQ网格搜索得到`截断参数`初始化`LWC`。 - -```yaml -# configs/quantization/combination/awq_comb_omni/w4a16g128/step_2_omniq.yml -model: - type: model_type - # Load AWQ-transformed model - path: /path/to/save_awq_trans/transformed_model - torch_dtype: auto -``` -```yaml -quant: - special: - search_clip_init: True -``` - -运行脚本: -```bash -# scripts/run_llmc.sh - -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=step_2_omni -config=${llmc}/configs/quantization/combination/awq_comb_omni/w4a16g128/step_2_omniq.yml -``` - -通过上述两个步骤的运行,LLMC 在 仅权重量化 的场景下,可以取得比 OmniQuant [原论文](https://arxiv.org/abs/2308.13137)更好的结果,更重要的是,LLMC 仅需 5 个 epoch 就能达到这一效果,远少于[原论文](https://arxiv.org/abs/2308.13137)中所需的 20或者40 个 epoch,大大减少了训练时间。 - -请注意, 在 仅权重量化 中,AWQ 的`截断参数`和`变换参数`不需要存储以供 OmniQuant 使用,只需保存一个经过变换的模型即可。这是因为 Learnable Equivalent Transformation (`LET`) 主要针对激活量化中的`异常值`(Outlier)现象,因此在仅权重量化中,OmniQuant 无需使用 `LET`。与此同时,使用 AWQ 的`截断参数`来初始化 Learnable Weight Clipping (`LWC`) 会在 LLMC 中的 OmniQuant 代码中自动完成。 - -## 1.2 权重-激活量化 - -以 `w8a8` 设置为例,我们提供了[AWQ 和 OmniQuant 的组合配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/combination/awq_comb_omni/w8a8)。 - - -### 1.2.1 运行AWQ - -**第一步**,运行与 AWQ 相关的[配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/combination/awq_comb_omni/w8a8/step_1_awq.yml)。请注意,在此步骤中需要将 `save_clip` 和`save_scale`参数设置为 `True` 以保存`截断参数`和`变换参数`。 注意,权重的校准方式要选择 learnable,因为只有 learnable方式得到的`截断参数`支持保存和加载。 - -```yaml -# configs/quantization/combination/awq_comb_omni/w8a8/step_1_awq.yml -quant: - weight: - bit: 8 - symmetric: False - granularity: per_channel - group_size: -1 - calib_algo: learnable - act: - bit: 8 - symmetric: False - granularity: per_token - calib_algo: minmax -``` - -```yaml -save: - save_scale: True - scale_path: /path/to/scale - save_clip: True - clip_path: /path/to/clip -``` - -运行脚本: -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=step_1_awq -config=${llmc}/configs/quantization/combination/awq_comb_omni/w8a8/step_1_awq.yml -``` - -### 1.2.2 运行OmniQuant - -**第二步**:加载 AWQ 生成的`截断参数`和`变换参数`, 在此步骤中,加载 AWQ 产出的`截断参数`和`变换参数`,以供 OmniQuant 中的 `LWC` 和 `LET` 进行初始化训练。运行与 OmniQuant 相关的[配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/combination/awq_comb_omni/w8a8/step_2_omniq.yml)。 - -```yaml -# configs/quantization/combination/awq_comb_omni/w8a8/step_2_omniq.yml -quant: - special: - # Use AWQ's search clip factors to initialize OmniQuant's clip factors, - # Then refine them through learning (LWC). - search_clip_init: True - load_clip: True - clip_path: /path/to/scale - # Use AWQ's search scale factors to initialize OmniQuant's scale factors, - # Then refine them through learning (LET). - search_scale_init: True - scale_path: /path/to/clip -``` - -请注意,在此步骤中需要将 `search_scale_init` 和 `search_clip_init` 参数设置为 `True`,以使用 AWQ 网格搜索得到的 `截断参数` 和 `变换参数` 初始化 `LWC` 和 `LET`。 - -运行脚本: -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=step_2_omniq -config=${llmc}/configs/quantization/combination/awq_comb_omni/w8a8/step_2_omniq.yml -``` -通过上述两个步骤的运行,LLMC 在 权重-激活量化 设置下可以取得比 [原论文](https://arxiv.org/abs/2308.13137)中更好的结果,且仅仅需要5个epoch。 diff --git a/docs/zh_cn/source/practice/quarot_gptq.md b/docs/zh_cn/source/practice/quarot_gptq.md deleted file mode 100644 index 2a0311a30..000000000 --- a/docs/zh_cn/source/practice/quarot_gptq.md +++ /dev/null @@ -1,65 +0,0 @@ -# QuaRot + GPTQ - - -## 1.1 权重-激活量化 - -QuaRot 旨在通过引入 `旋转矩阵`(如 `Hadamard 变换`)来优化大型语言模型的量化性能,使得模型的所有部分(包括权重、激活值)能够实现高效的 权重-激活量化。这种技术通过平滑激活值的分布,消除其中的`异常值`(Outliers),从而简化量化过程。 - -然而,由于 QuaRot 所使用的旋转矩阵具有随机性,其结果往往波动较大。为了解决这一问题,在 LLMC 中,我们可以采用 `QuaRot + GPTQ` 的组合策略。在施加 QuaRot 旋转后的权重上使用 GPTQ 重建量化输出,通过微调权重使得量化结果更加稳定和优异。(详细的分析见我们的[论文](https://arxiv.org/abs/2405.06001v2)) - -请注意,运行 QuaRot 需要 **Hadamard 变换 kernel** 的支持。此 kernel 的安装可以参考该 [仓库](https://github.com/spcl/QuaRot)。 - -### 1.1.1 运行Quarot - -**第一步**,运行与 Quarot 相关的[配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/combination/quarot_comb_gptq/w8a8/step_1_quarot.yml)。请注意,在此步骤中需要将 `save_trans` 参数设置为 `True` 以保存经过变换的模型。 - -```yaml -# configs/quantization/combination/quarot_comb_gptq/w8a8/step_1_quarot.yml - -save: - # Save the Quarot-transformed model. - save_trans: True - save_fake: False - save_path: /path/to/save_quarot_trans_for_gptq/ -``` -运行脚本: -```bash -# scripts/run_llmc.sh -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=step_1_quarot -config=${llmc}/configs/quantization/combination/quarot_comb_gptq/w8a8/step_1_quarot.yml -``` -### 1.1.2 运行GPTQ - -**第二步**,加载 Quarot 变换过的模型并运行与 GPTQ 相关的[配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/combination/quarot_comb_gptq/w8a8/step_2_gptq.yml)。 - -```yaml -# configs/quantization/combination/quarot_comb_gptq/w8a8/step_2_gptq.yml -model: - type: Llama - # Load Quarot-transformed model - path: /path/to/save_quarot_trans_for_gptq/transformed_model - torch_dtype: auto -``` - -运行脚本: -```bash -# scripts/run_llmc.sh - -llmc=llmc_path -export PYTHONPATH=$llmc:$PYTHONPATH - -task_name=step_2_gptq -config=${llmc}/configs/quantization/combination/quarot_comb_gptq/w8a8/step_2_gptq.yml - -``` -请注意,在 QuaRot 和 GPTQ 中都有 `online_rotate` 选项,务必确保两个配置文件中的该选项保持一致。该选项表示是否对激活进行在线旋转处理,这对提升精度有很大帮助,但不利于实际部署。有关在线旋转的详细说明,请参考[原 QuaRot 论文](https://arxiv.org/abs/2404.00456)。 -```yaml -quant: - special: - online_rotate: True -``` - -通过上述两个步骤的运行,LLMC 在 权重-激活量化 设置下可以取得比单独使用 Quarot 算法更好的结果 diff --git a/docs/zh_cn/source/quickstart.md b/docs/zh_cn/source/quickstart.md deleted file mode 100644 index 6d4854d12..000000000 --- a/docs/zh_cn/source/quickstart.md +++ /dev/null @@ -1,144 +0,0 @@ -# LLMC的安装 - -``` -git clone https://github.com/ModelTC/llmc.git -cd llmc/ -pip install -r requirements.txt -``` - -# 准备模型 - -**LLMC**目前仅支持`hugging face`格式的模型。以`Qwen2-0.5B`为例,可以在[这里](https://huggingface.co/Qwen/Qwen2-0.5B)找到模型。下载方式可以参考[这里](https://zhuanlan.zhihu.com/p/663712983) - -大陆地区用户还可以使用[hugging face镜像](https://hf-mirror.com/) - -一个简单的下载例子可以参考 -``` -pip install -U hf-transfer - -HF_ENDPOINT=https://hf-mirror.com HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download --resume-download Qwen/Qwen2-0.5B --local-dir Qwen2-0.5B -``` - -# 下载数据集 - -**LLMC**需要的数据集可以分为`校准数据集`和`测试数据集`。`校准数据集`可以在[这里](https://github.com/ModelTC/llmc/blob/main/tools/download_calib_dataset.py)下载,`测试数据`集可以在[这里](https://github.com/ModelTC/llmc/blob/main/tools/download_eval_dataset.py)下载 - -当然**LLMC**也支持在线下载数据集,只需要在`config`中的`download`设置为True即可。 - -```yaml -calib: - name: pileval - download: True -``` - -# 设置配置文件 - -所有的`配置文件`都在[这里](https://github.com/ModelTC/llmc/blob/main/configs/)可以找到,同时关于`配置文件`的说明请参考[此章节](https://llmc-zhcn.readthedocs.io/en/latest/configs.html) -以SmoothQuant为例,`config`在[这里](https://github.com/ModelTC/llmc/blob/main/configs/quantization/methods/SmoothQuant/smoothquant_w_a.yml) - -```yaml -base: - seed: &seed 42 -model: - type: Qwen2 # 设置模型名,可支持Llama,Qwen2,Llava,Gemma2等模型 - path: # 设置模型权重路径 - torch_dtype: auto -calib: - name: pileval - download: False - path: # 设置校准数据集路径 - n_samples: 512 - bs: 1 - seq_len: 512 - preproc: pileval_smooth - seed: *seed -eval: - eval_pos: [pretrain, transformed, fake_quant] - name: wikitext2 - download: False - path: # 设置测试数据集路径 - bs: 1 - seq_len: 2048 -quant: - method: SmoothQuant - weight: - bit: 8 - symmetric: True - granularity: per_channel - act: - bit: 8 - symmetric: True - granularity: per_token -save: - save_vllm: True # 当设置为True时,可以保存真实量化的整型模型,并通过VLLM推理引擎进行推理 - save_trans: False # 当设置为True,可以保存下调整之后的浮点权重 - save_path: ./save -``` -有关于`save`的更多选项和说明,请参照[此章节](https://llmc-zhcn.readthedocs.io/en/latest/configs.html) - - -**LLMC**在`configs/quantization/methods`路径下,提供了很多的[算法配置文件](https://github.com/ModelTC/llmc/tree/main/configs/quantization/methods)供大家参考。 - -# 开始运行 - -**LLMC**无需安装,只需在[运行脚本](https://github.com/ModelTC/llmc/blob/main/scripts/run_llmc.sh)中将`/path/to/llmc`修改为**LLMC**的`本地路径`即可。 -```bash -llmc=/path/to/llmc -export PYTHONPATH=$llmc:$PYTHONPATH -``` - -根据你想运行的算法,需相应修改[运行脚本](https://github.com/ModelTC/llmc/blob/main/scripts/run_llmc.sh)中的配置路径。例如,`${llmc}/configs/quantization/methods/SmoothQuant/smoothquant_w_a.yml`对应的是 SmoothQuant 量化的配置文件。`task_name`用于指定**LLMC**运行时生成的`日志文件名称`。 - -```bash -task_name=smooth_w_a -config=${llmc}/configs/quantization/methods/SmoothQuant/smoothquant_w_a.yml -``` - -当在运行脚本中,修改完相应的LLMC路径和config路径后,运行即可: - -```bash -bash run_llmc.sh -``` - -# 量化推理 - -假设你在配置文件中指定了保存`真实量化`模型的选项,例如 `save_vllm: True`,那么保存的`真实量化模型`即可直接用于对应的`推理后端`执行,具体可参照[文档](https://llmc-zhcn.readthedocs.io/en/latest)的`量化推理后端`章节。 - -# 常见问题 - -** 问题1 ** - -ValueError: Tokenizer class xxx does not exist or is not currently imported. - -** 解决方法 ** - -pip install transformers --upgrade - -** 问题2 ** - -下载数据集卡住,下载不下来 - -** 解决方法 ** - -大陆地区可能需要在vpn环境下才能正常访问hugging face的数据集 - -** 问题3 ** - -如果运行的是一个很大的模型,单卡显存放不下整个模型,那么eval的时候,会爆显存 - -** 解决方法 ** - -使用per block进行推理测试,打开inference_per_block,在不爆显存的前提下,适当提高bs以提高推理速度 -``` -bs: 10 -inference_per_block: True -``` - -** 问题4 ** - -Exception: ./save/transformed_model existed before. Need check. - -** 解决方法 ** - -保存的路径是一个已经存在的目录,需要换个不存在的保存目录 - diff --git a/examples/backend/autoawq/infer_with_autoawq.py b/examples/backend/autoawq/infer_with_autoawq.py deleted file mode 100644 index a157f0620..000000000 --- a/examples/backend/autoawq/infer_with_autoawq.py +++ /dev/null @@ -1,34 +0,0 @@ - - -import sys - -autoawq_path = '/path/to/AutoAWQ' -sys.path.append(autoawq_path) - -import torch -from awq import AutoAWQForCausalLM -from transformers import AutoTokenizer, TextStreamer - -model_path = '/path/to/save_for_autoawq_awq_w4/autoawq_quant_model' - -tokenizer = AutoTokenizer.from_pretrained(model_path) -streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) - -model = AutoAWQForCausalLM.from_quantized( - model_path, - torch_dtype=torch.float16, - low_cpu_mem_usage=True, - device_map='auto', -) - - -prompt_text = 'The president of the United States is ' -inputs = tokenizer(prompt_text, return_tensors='pt').to('cuda') - -outputs = model.generate( - **inputs, - do_sample=False, - max_new_tokens=100, - streamer=streamer, - eos_token_id=[tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids('<|eot_id|>')] -) diff --git a/examples/backend/mlcllm/infer_with_mlcllm.py b/examples/backend/mlcllm/infer_with_mlcllm.py deleted file mode 100644 index be9523aa7..000000000 --- a/examples/backend/mlcllm/infer_with_mlcllm.py +++ /dev/null @@ -1,17 +0,0 @@ -from mlc_llm import MLCEngine - -# Create engine -model_path = './dist/llama2-7b-chat-MLC/' -engine = MLCEngine(model_path) - -# Run chat completion in OpenAI API. -for response in engine.chat.completions.create( - messages=[{'role': 'user', 'content': 'What is the meaning of life?'}], - model=model_path, - stream=True, -): - for choice in response.choices: - print(choice.delta.content, end='', flush=True) -print('\n') - -engine.terminate() diff --git a/examples/backend/sglang/infer_with_sglang.py b/examples/backend/sglang/infer_with_sglang.py deleted file mode 100644 index 2a92b807c..000000000 --- a/examples/backend/sglang/infer_with_sglang.py +++ /dev/null @@ -1,13 +0,0 @@ -import openai - -client = openai.Client( - base_url='http://127.0.0.1:10000/v1', api_key='EMPTY') - -# Text completion -response = client.completions.create( - model='default', - prompt='The president of the United States is', - temperature=0, - max_tokens=32, -) -print(response) diff --git a/examples/backend/vllm/infer_with_vllm.py b/examples/backend/vllm/infer_with_vllm.py deleted file mode 100644 index 8b77349d1..000000000 --- a/examples/backend/vllm/infer_with_vllm.py +++ /dev/null @@ -1,21 +0,0 @@ -from transformers import AutoTokenizer -from vllm import LLM, SamplingParams - -model_path = '/path/to/save_for_vllm_awq_w4/real_quant_model' -model = LLM(model_path) -tokenizer = AutoTokenizer.from_pretrained(model_path) - -prompts = [ - 'Hello, my name is', - 'The president of the United States is', - 'The capital of France is', - 'The future of AI is', -] -sampling_params = SamplingParams(temperature=0.8, top_p=0.95) - -outputs = model.generate(prompts, sampling_params) - -for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - print(f'Prompt: {prompt!r}, Generated text: {generated_text!r}') diff --git a/examples/lm-eval-overview.ipynb b/examples/lm-eval-overview.ipynb new file mode 100644 index 000000000..7c4564d6a --- /dev/null +++ b/examples/lm-eval-overview.ipynb @@ -0,0 +1,1230 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "Qw83KAePAhaS" + }, + "source": [ + "# Releasing LM-Evaluation-Harness v0.4.0" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Z7k2vq1iAdqr" + }, + "source": [ + "With the vast amount of work done in the field today, it helps to have a tool that people can use easily to share their results and use to check others to ensure reported numbers are valid. The LM Evaluation Harness is one such tool the community has used extensively. We want to continue to support the community and with that in mind, we’re excited to announce a major update on the LM Evaluation Harness to further our goal for open and accessible AI research." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0gDoM0AJAvEc" + }, + "source": [ + "Our refactor stems from our desires to make the following believed best practices easier to carry out. \n", + "\n", + "1. Never copy results from other papers\n", + "2. Always share your exact prompts\n", + "3. Always provide model outputs\n", + "4. Qualitatively review a small batch of outputs before running evaluation jobs at scale\n", + "\n", + "We also wanted to make the library a better experience to use and to contribute or design evaluations within. New features in the new release that serve this purpose include:\n", + "\n", + "1. Faster Evaluation Runtimes (accelerated data-parallel inference with HF Transformers + Accelerate, and commonly used or faster inference libraries such as vLLM and Llama-CPP)\n", + "2. Easier addition and sharing of new tasks (YAML-based task config formats, allowing single-file sharing of custom tasks)\n", + "3. More configurability, for more advanced workflows and easier operation with modifying prompts\n", + "4. Better logging of data at runtime and post-hoc" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nnwsOpjda_YW" + }, + "source": [ + "In this notebook we will be going through a short tutorial on how things work." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zAov81vTbL2K" + }, + "source": [ + "## Install LM-Eval" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "8hiosGzq_qZg", + "outputId": "6ab73e5e-1f54-417e-a388-07e0d870b132" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Collecting git+https://github.com/EleutherAI/lm-evaluation-harness.git@big-refactor\n", + " Cloning https://github.com/EleutherAI/lm-evaluation-harness.git (to revision big-refactor) to /tmp/pip-req-build-tnssql5s\n", + " Running command git clone --filter=blob:none --quiet https://github.com/EleutherAI/lm-evaluation-harness.git /tmp/pip-req-build-tnssql5s\n", + " Running command git checkout -b big-refactor --track origin/big-refactor\n", + " Switched to a new branch 'big-refactor'\n", + " Branch 'big-refactor' set up to track remote branch 'big-refactor' from 'origin'.\n", + " Resolved https://github.com/EleutherAI/lm-evaluation-harness.git to commit 42f486ee49b65926a444cb0620870a39a5b4b0a8\n", + " Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n", + " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n", + " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", + "Collecting accelerate>=0.21.0 (from lm-eval==1.0.0)\n", + " Downloading accelerate-0.24.1-py3-none-any.whl (261 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m261.4/261.4 kB\u001b[0m \u001b[31m4.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting evaluate (from lm-eval==1.0.0)\n", + " Downloading evaluate-0.4.1-py3-none-any.whl (84 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m84.1/84.1 kB\u001b[0m \u001b[31m5.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting datasets>=2.0.0 (from lm-eval==1.0.0)\n", + " Downloading datasets-2.15.0-py3-none-any.whl (521 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m521.2/521.2 kB\u001b[0m \u001b[31m9.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting jsonlines (from lm-eval==1.0.0)\n", + " Downloading jsonlines-4.0.0-py3-none-any.whl (8.7 kB)\n", + "Requirement already satisfied: numexpr in /usr/local/lib/python3.10/dist-packages (from lm-eval==1.0.0) (2.8.7)\n", + "Collecting peft>=0.2.0 (from lm-eval==1.0.0)\n", + " Downloading peft-0.6.2-py3-none-any.whl (174 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m174.7/174.7 kB\u001b[0m \u001b[31m7.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting pybind11>=2.6.2 (from lm-eval==1.0.0)\n", + " Downloading pybind11-2.11.1-py3-none-any.whl (227 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m227.7/227.7 kB\u001b[0m \u001b[31m12.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting pytablewriter (from lm-eval==1.0.0)\n", + " Downloading pytablewriter-1.2.0-py3-none-any.whl (111 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m111.1/111.1 kB\u001b[0m \u001b[31m8.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting rouge-score>=0.0.4 (from lm-eval==1.0.0)\n", + " Downloading rouge_score-0.1.2.tar.gz (17 kB)\n", + " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + "Collecting sacrebleu>=1.5.0 (from lm-eval==1.0.0)\n", + " Downloading sacrebleu-2.3.2-py3-none-any.whl (119 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m119.7/119.7 kB\u001b[0m \u001b[31m8.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: scikit-learn>=0.24.1 in /usr/local/lib/python3.10/dist-packages (from lm-eval==1.0.0) (1.2.2)\n", + "Collecting sqlitedict (from lm-eval==1.0.0)\n", + " Downloading sqlitedict-2.1.0.tar.gz (21 kB)\n", + " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + "Requirement already satisfied: torch>=1.8 in /usr/local/lib/python3.10/dist-packages (from lm-eval==1.0.0) (2.1.0+cu118)\n", + "Collecting tqdm-multiprocess (from lm-eval==1.0.0)\n", + " Downloading tqdm_multiprocess-0.0.11-py3-none-any.whl (9.8 kB)\n", + "Requirement already satisfied: transformers>=4.1 in /usr/local/lib/python3.10/dist-packages (from lm-eval==1.0.0) (4.35.2)\n", + "Collecting zstandard (from lm-eval==1.0.0)\n", + " Downloading zstandard-0.22.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (5.4 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.4/5.4 MB\u001b[0m \u001b[31m29.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from accelerate>=0.21.0->lm-eval==1.0.0) (1.23.5)\n", + "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from accelerate>=0.21.0->lm-eval==1.0.0) (23.2)\n", + "Requirement already satisfied: psutil in /usr/local/lib/python3.10/dist-packages (from accelerate>=0.21.0->lm-eval==1.0.0) (5.9.5)\n", + "Requirement already satisfied: pyyaml in /usr/local/lib/python3.10/dist-packages (from accelerate>=0.21.0->lm-eval==1.0.0) (6.0.1)\n", + "Requirement already satisfied: huggingface-hub in /usr/local/lib/python3.10/dist-packages (from accelerate>=0.21.0->lm-eval==1.0.0) (0.19.4)\n", + "Requirement already satisfied: pyarrow>=8.0.0 in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (9.0.0)\n", + "Collecting pyarrow-hotfix (from datasets>=2.0.0->lm-eval==1.0.0)\n", + " Downloading pyarrow_hotfix-0.6-py3-none-any.whl (7.9 kB)\n", + "Collecting dill<0.3.8,>=0.3.0 (from datasets>=2.0.0->lm-eval==1.0.0)\n", + " Downloading dill-0.3.7-py3-none-any.whl (115 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m115.3/115.3 kB\u001b[0m \u001b[31m14.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (1.5.3)\n", + "Requirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (2.31.0)\n", + "Requirement already satisfied: tqdm>=4.62.1 in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (4.66.1)\n", + "Requirement already satisfied: xxhash in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (3.4.1)\n", + "Collecting multiprocess (from datasets>=2.0.0->lm-eval==1.0.0)\n", + " Downloading multiprocess-0.70.15-py310-none-any.whl (134 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m134.8/134.8 kB\u001b[0m \u001b[31m19.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: fsspec[http]<=2023.10.0,>=2023.1.0 in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (2023.6.0)\n", + "Requirement already satisfied: aiohttp in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (3.8.6)\n", + "Collecting responses<0.19 (from evaluate->lm-eval==1.0.0)\n", + " Downloading responses-0.18.0-py3-none-any.whl (38 kB)\n", + "Requirement already satisfied: safetensors in /usr/local/lib/python3.10/dist-packages (from peft>=0.2.0->lm-eval==1.0.0) (0.4.0)\n", + "Requirement already satisfied: absl-py in /usr/local/lib/python3.10/dist-packages (from rouge-score>=0.0.4->lm-eval==1.0.0) (1.4.0)\n", + "Requirement already satisfied: nltk in /usr/local/lib/python3.10/dist-packages (from rouge-score>=0.0.4->lm-eval==1.0.0) (3.8.1)\n", + "Requirement already satisfied: six>=1.14.0 in /usr/local/lib/python3.10/dist-packages (from rouge-score>=0.0.4->lm-eval==1.0.0) (1.16.0)\n", + "Collecting portalocker (from sacrebleu>=1.5.0->lm-eval==1.0.0)\n", + " Downloading portalocker-2.8.2-py3-none-any.whl (17 kB)\n", + "Requirement already satisfied: regex in /usr/local/lib/python3.10/dist-packages (from sacrebleu>=1.5.0->lm-eval==1.0.0) (2023.6.3)\n", + "Requirement already satisfied: tabulate>=0.8.9 in /usr/local/lib/python3.10/dist-packages (from sacrebleu>=1.5.0->lm-eval==1.0.0) (0.9.0)\n", + "Collecting colorama (from sacrebleu>=1.5.0->lm-eval==1.0.0)\n", + " Downloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n", + "Requirement already satisfied: lxml in /usr/local/lib/python3.10/dist-packages (from sacrebleu>=1.5.0->lm-eval==1.0.0) (4.9.3)\n", + "Requirement already satisfied: scipy>=1.3.2 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.24.1->lm-eval==1.0.0) (1.11.3)\n", + "Requirement already satisfied: joblib>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.24.1->lm-eval==1.0.0) (1.3.2)\n", + "Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.24.1->lm-eval==1.0.0) (3.2.0)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch>=1.8->lm-eval==1.0.0) (3.13.1)\n", + "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch>=1.8->lm-eval==1.0.0) (4.5.0)\n", + "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch>=1.8->lm-eval==1.0.0) (1.12)\n", + "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch>=1.8->lm-eval==1.0.0) (3.2.1)\n", + "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch>=1.8->lm-eval==1.0.0) (3.1.2)\n", + "Requirement already satisfied: triton==2.1.0 in /usr/local/lib/python3.10/dist-packages (from torch>=1.8->lm-eval==1.0.0) (2.1.0)\n", + "Requirement already satisfied: tokenizers<0.19,>=0.14 in /usr/local/lib/python3.10/dist-packages (from transformers>=4.1->lm-eval==1.0.0) (0.15.0)\n", + "Requirement already satisfied: attrs>=19.2.0 in /usr/local/lib/python3.10/dist-packages (from jsonlines->lm-eval==1.0.0) (23.1.0)\n", + "Requirement already satisfied: setuptools>=38.3.0 in /usr/local/lib/python3.10/dist-packages (from pytablewriter->lm-eval==1.0.0) (67.7.2)\n", + "Collecting DataProperty<2,>=1.0.1 (from pytablewriter->lm-eval==1.0.0)\n", + " Downloading DataProperty-1.0.1-py3-none-any.whl (27 kB)\n", + "Collecting mbstrdecoder<2,>=1.0.0 (from pytablewriter->lm-eval==1.0.0)\n", + " Downloading mbstrdecoder-1.1.3-py3-none-any.whl (7.8 kB)\n", + "Collecting pathvalidate<4,>=2.3.0 (from pytablewriter->lm-eval==1.0.0)\n", + " Downloading pathvalidate-3.2.0-py3-none-any.whl (23 kB)\n", + "Collecting tabledata<2,>=1.3.1 (from pytablewriter->lm-eval==1.0.0)\n", + " Downloading tabledata-1.3.3-py3-none-any.whl (11 kB)\n", + "Collecting tcolorpy<1,>=0.0.5 (from pytablewriter->lm-eval==1.0.0)\n", + " Downloading tcolorpy-0.1.4-py3-none-any.whl (7.9 kB)\n", + "Collecting typepy[datetime]<2,>=1.3.2 (from pytablewriter->lm-eval==1.0.0)\n", + " Downloading typepy-1.3.2-py3-none-any.whl (31 kB)\n", + "Requirement already satisfied: charset-normalizer<4.0,>=2.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets>=2.0.0->lm-eval==1.0.0) (3.3.2)\n", + "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets>=2.0.0->lm-eval==1.0.0) (6.0.4)\n", + "Requirement already satisfied: async-timeout<5.0,>=4.0.0a3 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets>=2.0.0->lm-eval==1.0.0) (4.0.3)\n", + "Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets>=2.0.0->lm-eval==1.0.0) (1.9.2)\n", + "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets>=2.0.0->lm-eval==1.0.0) (1.4.0)\n", + "Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets>=2.0.0->lm-eval==1.0.0) (1.3.1)\n", + "Requirement already satisfied: chardet<6,>=3.0.4 in /usr/local/lib/python3.10/dist-packages (from mbstrdecoder<2,>=1.0.0->pytablewriter->lm-eval==1.0.0) (5.2.0)\n", + "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets>=2.0.0->lm-eval==1.0.0) (3.4)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets>=2.0.0->lm-eval==1.0.0) (2.0.7)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets>=2.0.0->lm-eval==1.0.0) (2023.7.22)\n", + "Requirement already satisfied: python-dateutil<3.0.0,>=2.8.0 in /usr/local/lib/python3.10/dist-packages (from typepy[datetime]<2,>=1.3.2->pytablewriter->lm-eval==1.0.0) (2.8.2)\n", + "Requirement already satisfied: pytz>=2018.9 in /usr/local/lib/python3.10/dist-packages (from typepy[datetime]<2,>=1.3.2->pytablewriter->lm-eval==1.0.0) (2023.3.post1)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch>=1.8->lm-eval==1.0.0) (2.1.3)\n", + "Requirement already satisfied: click in /usr/local/lib/python3.10/dist-packages (from nltk->rouge-score>=0.0.4->lm-eval==1.0.0) (8.1.7)\n", + "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch>=1.8->lm-eval==1.0.0) (1.3.0)\n", + "Building wheels for collected packages: lm-eval, rouge-score, sqlitedict\n", + " Building wheel for lm-eval (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for lm-eval: filename=lm_eval-1.0.0-py3-none-any.whl size=994254 sha256=88356155b19f2891981ecef948326ad6ce8ca40a6009378410ec20d0e225995a\n", + " Stored in directory: /tmp/pip-ephem-wheel-cache-9v6ye7h3/wheels/17/01/26/599c0779e9858a70a73fa8a306699b5b9a868f820c225457b0\n", + " Building wheel for rouge-score (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for rouge-score: filename=rouge_score-0.1.2-py3-none-any.whl size=24933 sha256=6bb0d44e4881972c43ce194e7cb65233d309758cb15f0dec54590d3d2efcfc36\n", + " Stored in directory: /root/.cache/pip/wheels/5f/dd/89/461065a73be61a532ff8599a28e9beef17985c9e9c31e541b4\n", + " Building wheel for sqlitedict (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for sqlitedict: filename=sqlitedict-2.1.0-py3-none-any.whl size=16863 sha256=5747f7dd73ddf3d8fbcebf51b5e4f718fabe1e94bccdf16d2f22a2e65ee7fdf4\n", + " Stored in directory: /root/.cache/pip/wheels/79/d6/e7/304e0e6cb2221022c26d8161f7c23cd4f259a9e41e8bbcfabd\n", + "Successfully built lm-eval rouge-score sqlitedict\n", + "Installing collected packages: sqlitedict, zstandard, tcolorpy, pybind11, pyarrow-hotfix, portalocker, pathvalidate, mbstrdecoder, jsonlines, dill, colorama, typepy, tqdm-multiprocess, sacrebleu, rouge-score, responses, multiprocess, accelerate, datasets, DataProperty, tabledata, peft, evaluate, pytablewriter, lm-eval\n", + "Successfully installed DataProperty-1.0.1 accelerate-0.24.1 colorama-0.4.6 datasets-2.15.0 dill-0.3.7 evaluate-0.4.1 jsonlines-4.0.0 lm-eval-1.0.0 mbstrdecoder-1.1.3 multiprocess-0.70.15 pathvalidate-3.2.0 peft-0.6.2 portalocker-2.8.2 pyarrow-hotfix-0.6 pybind11-2.11.1 pytablewriter-1.2.0 responses-0.18.0 rouge-score-0.1.2 sacrebleu-2.3.2 sqlitedict-2.1.0 tabledata-1.3.3 tcolorpy-0.1.4 tqdm-multiprocess-0.0.11 typepy-1.3.2 zstandard-0.22.0\n" + ] + } + ], + "source": [ + "# Install LM-Eval\n", + "!pip install git+https://github.com/EleutherAI/lm-evaluation-harness.git" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 0, + "referenced_widgets": [ + "a1d3a8aa016544a78e8821c8f6199e06", + "f61ed33fad754146bdd2ac9db1ba1c48", + "bfa0af6aeff344c6845e1080a878e92e", + "fd1ad9e0367d4004aae853b91c3a7617", + "6b2d90209ec14230b3d58a74ac9b83bf", + "a73f357065d34d7baf0453ae4a8d75e2", + "46f521b73fd943c081c648fd873ebc0a", + "7c5689bc13684db8a22681f41863dddd", + "48763b6233374554ae76035c0483066f", + "4986a21eb560448fa79f4b25cde48951", + "aed3acd2f2d74003b44079c333a0698e" + ] + }, + "id": "uyO5MaKkZyah", + "outputId": "d46e8096-5086-4e49-967e-ea33d4a2a335" + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "a1d3a8aa016544a78e8821c8f6199e06", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Downloading builder script: 0%| | 0.00/5.67k [00:00\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "id": "fthNg3ywO-kA" + }, + "outputs": [], + "source": [ + "YAML_cola_string = '''\n", + "tag: yes_or_no_tasks\n", + "task: demo_cola\n", + "dataset_path: glue\n", + "dataset_name: cola\n", + "output_type: multiple_choice\n", + "training_split: train\n", + "validation_split: validation\n", + "doc_to_text: \"{{sentence}}\\nQuestion: Does this sentence make sense?\\nAnswer:\"\n", + "doc_to_target: label\n", + "doc_to_choice: [\"no\", \"yes\"]\n", + "should_decontaminate: true\n", + "doc_to_decontamination_query: sentence\n", + "metric_list:\n", + " - metric: acc\n", + "'''\n", + "with open('cola.yaml', 'w') as f:\n", + " f.write(YAML_cola_string)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "id": "XceRKCuuDtbn" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2023-11-29:11:56:33,016 INFO [utils.py:160] NumExpr defaulting to 2 threads.\n", + "2023-11-29 11:56:33.852995: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", + "2023-11-29 11:56:33.853050: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", + "2023-11-29 11:56:33.853087: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", + "2023-11-29 11:56:35.129047: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n", + "2023-11-29:11:56:38,546 INFO [__main__.py:132] Verbosity set to INFO\n", + "2023-11-29:11:56:47,509 WARNING [__main__.py:138] --limit SHOULD ONLY BE USED FOR TESTING.REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.\n", + "2023-11-29:11:56:47,509 INFO [__main__.py:143] Including path: ./\n", + "2023-11-29:11:56:47,517 INFO [__main__.py:205] Selected Tasks: ['yes_or_no_tasks']\n", + "2023-11-29:11:56:47,520 WARNING [evaluator.py:93] generation_kwargs specified through cli, these settings will be used over set parameters in yaml tasks.\n", + "2023-11-29:11:56:47,550 INFO [huggingface.py:120] Using device 'cuda'\n", + "2023-11-29:11:57:08,743 WARNING [task.py:614] [Task: demo_cola] metric acc is defined, but aggregation is not. using default aggregation=mean\n", + "2023-11-29:11:57:08,743 WARNING [task.py:626] [Task: demo_cola] metric acc is defined, but higher_is_better is not. using default higher_is_better=True\n", + "Downloading builder script: 100% 28.8k/28.8k [00:00<00:00, 52.7MB/s]\n", + "Downloading metadata: 100% 28.7k/28.7k [00:00<00:00, 51.9MB/s]\n", + "Downloading readme: 100% 27.9k/27.9k [00:00<00:00, 48.0MB/s]\n", + "Downloading data: 100% 377k/377k [00:00<00:00, 12.0MB/s]\n", + "Generating train split: 100% 8551/8551 [00:00<00:00, 19744.58 examples/s]\n", + "Generating validation split: 100% 1043/1043 [00:00<00:00, 27057.01 examples/s]\n", + "Generating test split: 100% 1063/1063 [00:00<00:00, 22705.17 examples/s]\n", + "2023-11-29:11:57:11,698 INFO [task.py:355] Building contexts for task on rank 0...\n", + "2023-11-29:11:57:11,704 INFO [evaluator.py:319] Running loglikelihood requests\n", + "100% 20/20 [00:03<00:00, 5.15it/s]\n", + "fatal: not a git repository (or any of the parent directories): .git\n", + "hf (pretrained=EleutherAI/pythia-2.8b), gen_kwargs: (), limit: 10.0, num_fewshot: None, batch_size: 1\n", + "| Tasks |Version|Filter|n-shot|Metric|Value| |Stderr|\n", + "|---------------|-------|------|-----:|------|----:|---|-----:|\n", + "|yes_or_no_tasks|N/A |none | 0|acc | 0.7|± |0.1528|\n", + "| - demo_cola |Yaml |none | 0|acc | 0.7|± |0.1528|\n", + "\n", + "| Groups |Version|Filter|n-shot|Metric|Value| |Stderr|\n", + "|---------------|-------|------|-----:|------|----:|---|-----:|\n", + "|yes_or_no_tasks|N/A |none | 0|acc | 0.7|± |0.1528|\n", + "\n" + ] + } + ], + "source": [ + "# !accelerate launch --no_python\n", + "!lm_eval \\\n", + " --model hf \\\n", + " --model_args pretrained=EleutherAI/pythia-2.8b \\\n", + " --include_path ./ \\\n", + " --tasks yes_or_no_tasks \\\n", + " --limit 10 \\\n", + " --output output/yes_or_no_tasks/ \\\n", + " --log_samples\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XceRKCuuDtbn" + }, + "source": [ + "## Edit Prompt Templates Quickly\n", + "\n", + "The following is a yaml made to evaluate the specific subtask of `high_school_geography` from MMLU. It uses the standard prompt where the we choose the letters from the options with most likelihood as the model's prediction." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "id": "GTFvdt9kSlBG" + }, + "outputs": [], + "source": [ + "YAML_mmlu_geo_string = '''\n", + "task: demo_mmlu_high_school_geography\n", + "dataset_path: cais/mmlu\n", + "dataset_name: high_school_geography\n", + "description: \"The following are multiple choice questions (with answers) about high school geography.\\n\\n\"\n", + "test_split: test\n", + "fewshot_split: dev\n", + "fewshot_config:\n", + " sampler: first_n\n", + "output_type: multiple_choice\n", + "doc_to_text: \"{{question.strip()}}\\nA. {{choices[0]}}\\nB. {{choices[1]}}\\nC. {{choices[2]}}\\nD. {{choices[3]}}\\nAnswer:\"\n", + "doc_to_choice: [\"A\", \"B\", \"C\", \"D\"]\n", + "doc_to_target: answer\n", + "metric_list:\n", + " - metric: acc\n", + " aggregation: mean\n", + " higher_is_better: true\n", + " - metric: acc_norm\n", + " aggregation: mean\n", + " higher_is_better: true\n", + "'''\n", + "with open('mmlu_high_school_geography.yaml', 'w') as f:\n", + " f.write(YAML_mmlu_geo_string)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "id": "jyKOfCsKb-xy" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2023-11-29:11:57:23,598 INFO [utils.py:160] NumExpr defaulting to 2 threads.\n", + "2023-11-29 11:57:24.719750: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", + "2023-11-29 11:57:24.719806: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", + "2023-11-29 11:57:24.719847: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", + "2023-11-29 11:57:26.656125: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n", + "2023-11-29:11:57:31,563 INFO [__main__.py:132] Verbosity set to INFO\n", + "2023-11-29:11:57:40,541 WARNING [__main__.py:138] --limit SHOULD ONLY BE USED FOR TESTING.REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.\n", + "2023-11-29:11:57:40,541 INFO [__main__.py:143] Including path: ./\n", + "2023-11-29:11:57:40,558 INFO [__main__.py:205] Selected Tasks: ['demo_mmlu_high_school_geography']\n", + "2023-11-29:11:57:40,559 WARNING [evaluator.py:93] generation_kwargs specified through cli, these settings will be used over set parameters in yaml tasks.\n", + "2023-11-29:11:57:40,589 INFO [huggingface.py:120] Using device 'cuda'\n", + "Downloading builder script: 100% 5.84k/5.84k [00:00<00:00, 17.7MB/s]\n", + "Downloading metadata: 100% 106k/106k [00:00<00:00, 892kB/s] \n", + "Downloading readme: 100% 39.7k/39.7k [00:00<00:00, 631kB/s]\n", + "Downloading data: 100% 166M/166M [00:01<00:00, 89.0MB/s]\n", + "Generating auxiliary_train split: 100% 99842/99842 [00:07<00:00, 12536.83 examples/s]\n", + "Generating test split: 100% 198/198 [00:00<00:00, 1439.20 examples/s]\n", + "Generating validation split: 100% 22/22 [00:00<00:00, 4181.76 examples/s]\n", + "Generating dev split: 100% 5/5 [00:00<00:00, 36.25 examples/s]\n", + "2023-11-29:11:58:09,798 INFO [task.py:355] Building contexts for task on rank 0...\n", + "2023-11-29:11:58:09,822 INFO [evaluator.py:319] Running loglikelihood requests\n", + "100% 40/40 [00:05<00:00, 7.86it/s]\n", + "fatal: not a git repository (or any of the parent directories): .git\n", + "hf (pretrained=EleutherAI/pythia-2.8b), gen_kwargs: (), limit: 10.0, num_fewshot: None, batch_size: 1\n", + "| Tasks |Version|Filter|n-shot| Metric |Value| |Stderr|\n", + "|-------------------------------|-------|------|-----:|--------|----:|---|-----:|\n", + "|demo_mmlu_high_school_geography|Yaml |none | 0|acc | 0.3|± |0.1528|\n", + "| | |none | 0|acc_norm| 0.3|± |0.1528|\n", + "\n" + ] + } + ], + "source": [ + "# !accelerate launch --no_python\n", + "!lm_eval \\\n", + " --model hf \\\n", + " --model_args pretrained=EleutherAI/pythia-2.8b \\\n", + " --include_path ./ \\\n", + " --tasks demo_mmlu_high_school_geography \\\n", + " --limit 10 \\\n", + " --output output/mmlu_high_school_geography/ \\\n", + " --log_samples" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jyKOfCsKb-xy" + }, + "source": [ + "We could also evaluate this task in a different way. For example, instead of observing the loglikelihood of the letters, we can instead evaluate on the choices themselves as the continuation. This is done by simply changing `doc_to_choice` from a list of letters to the corresponding `choices` field from the HF dataset. We write `\"{{choices}}\"` so that the string field is interpreted as jinja string that acquires the list from the HF dataset directly.\n", + "\n", + "Another convenient feature here is since we're only modifying the `doc_to_choice` and the rest of config is the same as the task above, we can use the above configuration as a template by using `include: mmlu_high_school_geography.yaml` to load the config from that file. We'll need to add a unique task name as to not colide with the existing yaml config we're including. For this case we'll simply name this one `mmlu_high_school_geography_continuation`. `doc_to_text` is added here just for sake of clarity." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "id": "lqElwU54TaK-" + }, + "outputs": [], + "source": [ + "YAML_mmlu_geo_string = '''\n", + "include: mmlu_high_school_geography.yaml\n", + "task: demo_mmlu_high_school_geography_continuation\n", + "doc_to_text: \"{{question.strip()}}\\nA. {{choices[0]}}\\nB. {{choices[1]}}\\nC. {{choices[2]}}\\nD. {{choices[3]}}\\nAnswer:\"\n", + "doc_to_choice: \"{{choices}}\"\n", + "'''\n", + "with open('mmlu_high_school_geography_continuation.yaml', 'w') as f:\n", + " f.write(YAML_mmlu_geo_string)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "id": "-_CVnDirdy7j" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2023-11-29:11:58:21,284 INFO [utils.py:160] NumExpr defaulting to 2 threads.\n", + "2023-11-29 11:58:22.850159: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", + "2023-11-29 11:58:22.850219: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", + "2023-11-29 11:58:22.850254: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", + "2023-11-29 11:58:24.948103: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n", + "2023-11-29:11:58:28,460 INFO [__main__.py:132] Verbosity set to INFO\n", + "2023-11-29:11:58:37,935 WARNING [__main__.py:138] --limit SHOULD ONLY BE USED FOR TESTING.REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.\n", + "2023-11-29:11:58:37,935 INFO [__main__.py:143] Including path: ./\n", + "2023-11-29:11:58:37,969 INFO [__main__.py:205] Selected Tasks: ['demo_mmlu_high_school_geography_continuation']\n", + "2023-11-29:11:58:37,972 WARNING [evaluator.py:93] generation_kwargs specified through cli, these settings will be used over set parameters in yaml tasks.\n", + "2023-11-29:11:58:38,008 INFO [huggingface.py:120] Using device 'cuda'\n", + "2023-11-29:11:58:59,758 INFO [task.py:355] Building contexts for task on rank 0...\n", + "2023-11-29:11:58:59,777 INFO [evaluator.py:319] Running loglikelihood requests\n", + "100% 40/40 [00:02<00:00, 16.23it/s]\n", + "fatal: not a git repository (or any of the parent directories): .git\n", + "hf (pretrained=EleutherAI/pythia-2.8b), gen_kwargs: (), limit: 10.0, num_fewshot: None, batch_size: 1\n", + "| Tasks |Version|Filter|n-shot| Metric |Value| |Stderr|\n", + "|--------------------------------------------|-------|------|-----:|--------|----:|---|-----:|\n", + "|demo_mmlu_high_school_geography_continuation|Yaml |none | 0|acc | 0.1|± |0.1000|\n", + "| | |none | 0|acc_norm| 0.2|± |0.1333|\n", + "\n" + ] + } + ], + "source": [ + "# !accelerate launch --no_python\n", + "!lm_eval \\\n", + " --model hf \\\n", + " --model_args pretrained=EleutherAI/pythia-2.8b \\\n", + " --include_path ./ \\\n", + " --tasks demo_mmlu_high_school_geography_continuation \\\n", + " --limit 10 \\\n", + " --output output/mmlu_high_school_geography_continuation/ \\\n", + " --log_samples\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-_CVnDirdy7j" + }, + "source": [ + "If we take a look at the samples, we can see that it is in fact evaluating the continuation based on the choices rather than the letters." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "id": "duBDqC6PAdjL" + }, + "outputs": [ + { + "data": { + "application/javascript": "\n ((filepath) => {{\n if (!google.colab.kernel.accessAllowed) {{\n return;\n }}\n google.colab.files.view(filepath);\n }})(\"/content/output/mmlu_high_school_geography_continuation/pretrained__EleutherAI__pythia-2.8b_demo_mmlu_high_school_geography_continuation.jsonl\")", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from google.colab import files\n", + "files.view(\"output/mmlu_high_school_geography_continuation/pretrained__EleutherAI__pythia-2.8b_demo_mmlu_high_school_geography_continuation.jsonl\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6p0-KPwAgK5j" + }, + "source": [ + "## Closer Look at YAML Fields\n", + "\n", + "To prepare a task we can simply fill in a YAML config with the relevant information.\n", + "\n", + "`output_type`\n", + "The current provided evaluation types comprise of the following:\n", + "1. `loglikelihood`: Evaluates the loglikelihood of a continuation, conditioned on some input string.\n", + "2. `loglikelihood_rolling`: evaluate the loglikelihood of producing a string, conditioned on the empty string. (Used for perplexity evaluations)\n", + "3. `multiple_choice`: Evaluates loglikelihood among the a number of choices predicted by the model.\n", + "4. `greedy_until`: Model outputs greedy generation (can be configured to to use beam search and other generation-related parameters)\n", + "\n", + "The core prompt revolves around 3 fields.\n", + "1. `doc_to_text`: Denotes the prompt template that will be used as input to the model.\n", + "2. `doc_to_choice`: Available choices that will be used as continuation for the model. This is used when the `output_type` is `multiple_choice`, and otherwise can be left as `None`.\n", + "3. `doc_to_target`: When `output_type` is `multiple_choice`, this can be an index that corresponds to the correct answer, or the answer string itself (must be a subset of `doc_to_choice`). For other tasks, this is expected to be a string. You can fill this field with a feature name from the HF dataset so long as the resulting feature follows the conditioned described.\n", + "\n", + "These three fields can be expressed as strings, column names from the source dataset, or as Jinja2 templates that can use fields from the source dataset as variables.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6p0-KPwAgK5j" + }, + "source": [ + "## What if Jinja is not Sufficient?\n", + "\n", + "There can be times where the Jinja2 templating language is not enough to make the prompt we had in mind. There are a few ways to circumvent this limitation:\n", + "\n", + "1. Use `!function` operator for the prompt-related fields to pass a python function that takes as input the dataset row, and will output the prompt template component.\n", + "2. Perform a transformation on the dataset beforehand." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Below, we show an example of using `!function` to create `doc_to_text` from a python function:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "DYZ5c0JhR1lJ", + "outputId": "ca945235-fb9e-4f17-8bfa-78e7d6ec1490" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2023-11-29:11:59:08,312 INFO [utils.py:160] NumExpr defaulting to 2 threads.\n", + "2023-11-29 11:59:09.348327: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", + "2023-11-29 11:59:09.348387: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", + "2023-11-29 11:59:09.348421: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", + "2023-11-29 11:59:10.573752: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n", + "2023-11-29:11:59:14,044 INFO [__main__.py:132] Verbosity set to INFO\n", + "2023-11-29:11:59:23,654 WARNING [__main__.py:138] --limit SHOULD ONLY BE USED FOR TESTING.REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.\n", + "2023-11-29:11:59:23,654 INFO [__main__.py:143] Including path: ./\n", + "2023-11-29:11:59:23,678 INFO [__main__.py:205] Selected Tasks: ['demo_mmlu_high_school_geography_function_prompt']\n", + "2023-11-29:11:59:23,679 WARNING [evaluator.py:93] generation_kwargs specified through cli, these settings will be used over set parameters in yaml tasks.\n", + "2023-11-29:11:59:23,708 INFO [huggingface.py:120] Using device 'cuda'\n", + "2023-11-29:11:59:44,516 INFO [task.py:355] Building contexts for task on rank 0...\n", + "2023-11-29:11:59:44,524 INFO [evaluator.py:319] Running loglikelihood requests\n", + "100% 40/40 [00:02<00:00, 15.41it/s]\n", + "fatal: not a git repository (or any of the parent directories): .git\n", + "hf (pretrained=EleutherAI/pythia-2.8b), gen_kwargs: (), limit: 10.0, num_fewshot: None, batch_size: 1\n", + "| Tasks |Version|Filter|n-shot| Metric |Value| |Stderr|\n", + "|-----------------------------------------------|-------|------|-----:|--------|----:|---|-----:|\n", + "|demo_mmlu_high_school_geography_function_prompt|Yaml |none | 0|acc | 0.1|± |0.1000|\n", + "| | |none | 0|acc_norm| 0.2|± |0.1333|\n", + "\n" + ] + } + ], + "source": [ + "YAML_mmlu_geo_string = '''\n", + "include: mmlu_high_school_geography.yaml\n", + "task: demo_mmlu_high_school_geography_function_prompt\n", + "doc_to_text: !function utils.doc_to_text\n", + "doc_to_choice: \"{{choices}}\"\n", + "'''\n", + "with open('demo_mmlu_high_school_geography_function_prompt.yaml', 'w') as f:\n", + " f.write(YAML_mmlu_geo_string)\n", + "\n", + "DOC_TO_TEXT = '''\n", + "def doc_to_text(x):\n", + " question = x[\"question\"].strip()\n", + " choices = x[\"choices\"]\n", + " option_a = choices[0]\n", + " option_b = choices[1]\n", + " option_c = choices[2]\n", + " option_d = choices[3]\n", + " return f\"{question}\\\\nA. {option_a}\\\\nB. {option_b}\\\\nC. {option_c}\\\\nD. {option_d}\\\\nAnswer:\"\n", + "'''\n", + "with open('utils.py', 'w') as f:\n", + " f.write(DOC_TO_TEXT)\n", + "\n", + "!lm_eval \\\n", + " --model hf \\\n", + " --model_args pretrained=EleutherAI/pythia-2.8b \\\n", + " --include_path ./ \\\n", + " --tasks demo_mmlu_high_school_geography_function_prompt \\\n", + " --limit 10 \\\n", + " --output output/demo_mmlu_high_school_geography_function_prompt/ \\\n", + " --log_samples\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we'll also show how to do this via preprocessing the dataset as necessary using the `process_docs` config field:\n", + "\n", + "We will write a function that will modify each document in our evaluation dataset's split to add a field that is suitable for us to use in `doc_to_text`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "YAML_mmlu_geo_string = '''\n", + "include: mmlu_high_school_geography.yaml\n", + "task: demo_mmlu_high_school_geography_function_prompt_2\n", + "process_docs: !function utils_process_docs.process_docs\n", + "doc_to_text: \"{{input}}\"\n", + "doc_to_choice: \"{{choices}}\"\n", + "'''\n", + "with open('demo_mmlu_high_school_geography_process_docs.yaml', 'w') as f:\n", + " f.write(YAML_mmlu_geo_string)\n", + "\n", + "DOC_TO_TEXT = '''\n", + "def process_docs(dataset):\n", + " def _process_doc(x):\n", + " question = x[\"question\"].strip()\n", + " choices = x[\"choices\"]\n", + " option_a = choices[0]\n", + " option_b = choices[1]\n", + " option_c = choices[2]\n", + " option_d = choices[3]\n", + " doc[\"input\"] = f\"{question}\\\\nA. {option_a}\\\\nB. {option_b}\\\\nC. {option_c}\\\\nD. {option_d}\\\\nAnswer:\"\n", + " return out_doc\n", + "\n", + " return dataset.map(_process_doc)\n", + "'''\n", + "\n", + "with open('utils_process_docs.py', 'w') as f:\n", + " f.write(DOC_TO_TEXT)\n", + "\n", + "!lm_eval \\\n", + " --model hf \\\n", + " --model_args pretrained=EleutherAI/pythia-2.8b \\\n", + " --include_path ./ \\\n", + " --tasks demo_mmlu_high_school_geography_function_prompt_2 \\\n", + " --limit 10 \\\n", + " --output output/demo_mmlu_high_school_geography_function_prompt_2/ \\\n", + " --log_samples\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We hope that this explainer gives you a sense of what can be done with and how to work with LM-Evaluation-Harnes v0.4.0 ! \n", + "\n", + "For more information, check out our documentation pages in the `docs/` folder, and if you have questions, please raise them in GitHub issues, or in #lm-thunderdome or #release-discussion on the EleutherAI discord server." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [ + "zAov81vTbL2K" + ], + "gpuType": "T4", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "46f521b73fd943c081c648fd873ebc0a": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "48763b6233374554ae76035c0483066f": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "4986a21eb560448fa79f4b25cde48951": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "6b2d90209ec14230b3d58a74ac9b83bf": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "7c5689bc13684db8a22681f41863dddd": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "a1d3a8aa016544a78e8821c8f6199e06": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_f61ed33fad754146bdd2ac9db1ba1c48", + "IPY_MODEL_bfa0af6aeff344c6845e1080a878e92e", + "IPY_MODEL_fd1ad9e0367d4004aae853b91c3a7617" + ], + "layout": "IPY_MODEL_6b2d90209ec14230b3d58a74ac9b83bf" + } + }, + "a73f357065d34d7baf0453ae4a8d75e2": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "aed3acd2f2d74003b44079c333a0698e": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "bfa0af6aeff344c6845e1080a878e92e": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_7c5689bc13684db8a22681f41863dddd", + "max": 5669, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_48763b6233374554ae76035c0483066f", + "value": 5669 + } + }, + "f61ed33fad754146bdd2ac9db1ba1c48": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_a73f357065d34d7baf0453ae4a8d75e2", + "placeholder": "​", + "style": "IPY_MODEL_46f521b73fd943c081c648fd873ebc0a", + "value": "Downloading builder script: 100%" + } + }, + "fd1ad9e0367d4004aae853b91c3a7617": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_4986a21eb560448fa79f4b25cde48951", + "placeholder": "​", + "style": "IPY_MODEL_aed3acd2f2d74003b44079c333a0698e", + "value": " 5.67k/5.67k [00:00<00:00, 205kB/s]" + } + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/examples/visualize-wandb.ipynb b/examples/visualize-wandb.ipynb new file mode 100644 index 000000000..d0113d559 --- /dev/null +++ b/examples/visualize-wandb.ipynb @@ -0,0 +1,170 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "fc477b96-adee-4829-a9d7-a5eb990df358", + "metadata": {}, + "source": [ + "# Visualizing Results in Weights and Biases\n", + "\n", + "With the Weights and Biases integration, you can now spend more time extracting deeper insights into your evaluation results. The integration is designed to streamline the process of logging and visualizing experiment results using the Weights & Biases (W&B) platform.\n", + "\n", + "The integration provide functionalities\n", + "\n", + "- to automatically log the evaluation results,\n", + "- log the samples as W&B Tables for easy visualization,\n", + "- log the `results.json` file as an artifact for version control,\n", + "- log the `_eval_samples.json` file if the samples are logged,\n", + "- generate a comprehensive report for analysis and visualization with all the important metric,\n", + "- log task and cli configs,\n", + "- and more out of the box like the command used to run the evaluation, GPU/CPU counts, timestamp, etc.\n", + "\n", + "The integration is super easy to use with the eval harness. Let's see how!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3851439a-bff4-41f2-bf21-1b3d8704913b", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# Install this project if you did not already have it.\n", + "# This is all that is needed to be installed to start using Weights and Biases\n", + "\n", + "!pip -qq install -e ..[wandb]" + ] + }, + { + "cell_type": "markdown", + "id": "8507fd7e-3b99-4a92-89fa-9eaada74ba91", + "metadata": {}, + "source": [ + "# Run the Eval Harness\n", + "\n", + "Run the eval harness as usual with a `wandb_args` flag. This flag is used to provide arguments for initializing a wandb run ([wandb.init](https://docs.wandb.ai/ref/python/init)) as comma separated string arguments.\n", + "\n", + "If `wandb_args` flag is used, the metrics and all other goodness will be automatically logged to Weights and Biases. In the stdout, you will find the link to the W&B run page as well as link to the generated report." + ] + }, + { + "cell_type": "markdown", + "id": "eec5866e-f01e-42f8-8803-9d77472ef991", + "metadata": {}, + "source": [ + "## Set your API Key\n", + "\n", + "Before you can use W&B, you need to authenticate your machine with an authentication key. Visit https://wandb.ai/authorize to get one." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d824d163-71a9-4313-935d-f1d56397841c", + "metadata": {}, + "outputs": [], + "source": [ + "import wandb\n", + "\n", + "wandb.login()" + ] + }, + { + "cell_type": "markdown", + "id": "124e4a34-1547-4bed-bc09-db012bacbda6", + "metadata": {}, + "source": [ + "> Note that if you are using command line you can simply authenticate your machine by doing `wandb login` in your terminal. For more info check out the [documentation](https://docs.wandb.ai/quickstart#2-log-in-to-wb)." + ] + }, + { + "cell_type": "markdown", + "id": "abc6f6b6-179a-4aff-ada9-f380fb74df6e", + "metadata": {}, + "source": [ + "## Run and log to W&B" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bd0a8130-a97b-451a-acd2-3f9885b88643", + "metadata": {}, + "outputs": [], + "source": [ + "!lm_eval \\\n", + " --model hf \\\n", + " --model_args pretrained=microsoft/phi-2,trust_remote_code=True \\\n", + " --tasks hellaswag,mmlu_abstract_algebra \\\n", + " --device cuda:0 \\\n", + " --batch_size 8 \\\n", + " --output_path output/phi-2 \\\n", + " --limit 10 \\\n", + " --wandb_args project=lm-eval-harness-integration \\\n", + " --log_samples" + ] + }, + { + "cell_type": "markdown", + "id": "e974cabdbe70b667", + "metadata": {}, + "source": [] + }, + { + "cell_type": "markdown", + "id": "5178ca9445b844e4", + "metadata": {}, + "source": [ + "W&B can also be initialized programmatically for use outside the CLI to parse and log the results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c6a421b2cf3ddac5", + "metadata": {}, + "outputs": [], + "source": [ + "import lm_eval\n", + "from lm_eval.loggers import WandbLogger\n", + "\n", + "results = lm_eval.simple_evaluate(\n", + " model=\"hf\",\n", + " model_args=\"pretrained=microsoft/phi-2,trust_remote_code=True\",\n", + " tasks=\"hellaswag,mmlu_abstract_algebra\",\n", + " log_samples=True,\n", + ")\n", + "\n", + "wandb_logger = WandbLogger(\n", + " project=\"lm-eval-harness-integration\", job_type=\"eval\"\n", + ") # or empty if wandb.init(...) already called before\n", + "wandb_logger.post_init(results)\n", + "wandb_logger.log_eval_result()\n", + "wandb_logger.log_eval_samples(results[\"samples\"]) # if log_samples" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/visualize-zeno.ipynb b/examples/visualize-zeno.ipynb new file mode 100644 index 000000000..4ceabbf42 --- /dev/null +++ b/examples/visualize-zeno.ipynb @@ -0,0 +1,115 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Visualizing Results in Zeno\n", + "\n", + "Benchmarking your models is the first step towards making sure your model performs well.\n", + "However, looking at the data behind the benchmark, slicing the data into subsets, and comparing models on individual instances can help you even more in evaluating and quantifying the behavior of your AI system.\n", + "\n", + "All of this can be done in [Zeno](https://zenoml.com)!\n", + "Zeno is super easy to use with the eval harness, let's explore how you can easily upload and visualize your eval results.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Install this project if you did not already do that. This is all that needs to be installed for you to be able to visualize your data in Zeno!\n", + "!pip install -e ..\n", + "!pip install -e ..[zeno]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Run the Eval Harness\n", + "\n", + "To visualize the results, run the eval harness with the `log_samples` and `output_path` flags. We expect `output_path` to contain multiple folders that represent individual model names. You can thus run your evaluation on any number of tasks and models and upload all of the results as projects on Zeno.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!lm_eval \\\n", + " --model hf \\\n", + " --model_args pretrained=EleutherAI/gpt-neo-2.7B \\\n", + " --tasks hellaswag,wikitext \\\n", + " --batch_size 8 \\\n", + " --device mps \\\n", + " --log_samples \\\n", + " --output_path output/gpt-neo-2.7B \\\n", + " --limit 10" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Set your API Key\n", + "\n", + "This is so you can be authenticated with Zeno.\n", + "If you don't already have a Zeno account, first create an account on [Zeno Hub](https://hub.zenoml.com).\n", + "After logging in to Zeno Hub, generate your API key by clicking on your profile at the bottom left to navigate to your account page.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%env ZENO_API_KEY=YOUR_API_KEY" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Visualize Eval Results\n", + "\n", + "You can now use the `zeno_visualize` script to upload the results to Zeno.\n", + "\n", + "This will use all subfolders in `data_path` as different models and upload all tasks within these model folders to Zeno. If you run the eval harness on multiple tasks, the `project_name` will be used as a prefix and one project will be created per task.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python ../scripts/zeno_visualize.py --data_path output --project_name \"Zeno Upload Test\"" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "zeno_projects", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.11" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/ignore.txt b/ignore.txt new file mode 100644 index 000000000..de10b539b --- /dev/null +++ b/ignore.txt @@ -0,0 +1,8 @@ +ROUGE +rouge +nin +maka +mor +te +ond +extraversion diff --git a/imgs/K.png b/imgs/K.png deleted file mode 100644 index 47ddb2ec5..000000000 Binary files a/imgs/K.png and /dev/null differ diff --git a/imgs/best_practice.png b/imgs/best_practice.png deleted file mode 100644 index 811685b3b..000000000 Binary files a/imgs/best_practice.png and /dev/null differ diff --git a/imgs/llmc+.png b/imgs/llmc+.png deleted file mode 100644 index 6b41f6d90..000000000 Binary files a/imgs/llmc+.png and /dev/null differ diff --git a/imgs/llmc.png b/imgs/llmc.png deleted file mode 100644 index bed0fea79..000000000 Binary files a/imgs/llmc.png and /dev/null differ diff --git a/llmc/__main__.py b/llmc/__main__.py deleted file mode 100755 index ec60c1492..000000000 --- a/llmc/__main__.py +++ /dev/null @@ -1,268 +0,0 @@ -import argparse -import gc -import json -import os -import sys -import time - -import torch -import torch.distributed as dist -import yaml -from easydict import EasyDict -from loguru import logger -from torch.distributed import destroy_process_group, init_process_group - -from llmc.compression.quantization import * -from llmc.compression.sparsification import * -from llmc.compression.token_reduction import * -from llmc.data import BaseDataset -from llmc.eval.utils import eval_model, get_eval_list -from llmc.models import * -from llmc.utils import (check_config, deploy_all_modality, get_modality, - mkdirs, print_important_package_version, seed_all, - update_autoawq_quant_config, - update_lightx2v_quant_config, update_vllm_quant_config) -from llmc.utils.registry_factory import ALGO_REGISTRY, MODEL_REGISTRY - - -def main(config): - model = MODEL_REGISTRY[config.model.type](config) - - logger.info(f'model: {model}') - logger.info(f'tokenizer: {model.get_tokenizer()}') - - eval_list = get_eval_list(model, config) - eval_model(model, None, eval_list, eval_pos='pretrain') - - blockwise_opts = [] - modalities, modality_configs = get_modality(config) - - for modality, modality_config in zip(modalities, modality_configs): - model.set_modality(modality) - if not config.get('calib', False): - blockwise_opt = ALGO_REGISTRY[modality_config.method]( - model, - modality_config, - input=None, - padding_mask=None, - config=config, - ) - blockwise_opt.run_block_loop() - blockwise_opts.append(blockwise_opt) - dist.barrier() - else: - dataset = BaseDataset( - model.get_tokenizer(), config.calib, model.batch_process - ) - calib_data, padding_mask = dataset.get_calib_dataset() - model.collect_first_block_input(calib_data, padding_mask) - del calib_data - gc.collect() - torch.cuda.empty_cache() - blockwise_opt = ALGO_REGISTRY[modality_config.method]( - model, - modality_config, - model.get_first_block_input(), - model.get_padding_mask(), - config, - ) - blockwise_opt.run_block_loop() - blockwise_opts.append(blockwise_opt) - dist.barrier() - - eval_model(model, blockwise_opts, eval_list, eval_pos='transformed') - if int(os.environ['RANK']) == 0: - if 'save' in config and config.save.get('save_trans', False): - blockwise_opt.save_model(save_trans_path) - - if 'save' in config and config.save.get('save_trtllm', False): - blockwise_opt.save_model(save_trtllm_trans_path) - from llmc.utils.export_trtllm import cvt_trtllm_engine - - cvt_trtllm_engine( - save_trtllm_trans_path, - save_trtllm_engine_path, - config.save.get('trtllm_cfg'), - ) - - eval_model(model, blockwise_opts, eval_list, eval_pos='fake_quant') - eval_model(model, blockwise_opts, eval_list, eval_pos='fake_quant_wo_kv') - - if 'save' in config and config.save.get('save_fake', False): - deploy_all_modality(blockwise_opts, 'fake_quant') - blockwise_opt.save_model(save_fake_path) - - if 'save' in config: - if ( - config.save.get('save_vllm', False) - or config.save.get('save_sgl', False) - or config.save.get('save_lightllm', False) - ): - for modality_config in modality_configs: - w, a = modality_config.weight, modality_config.get('act') - - if isinstance(w.bit, str): - assert w.symmetric, 'Only symmetric quant is supported.' - assert w.bit in ['e4m3', 'e3m4'], 'Supported quant: w8a16.' - if a: - assert ( - w.symmetric and a.symmetric - ), 'Only symmetric quant is supported.' - assert ( - w.bit == a.bit - and w.bit in ['e4m3', 'e5m2'] - and a.bit in ['e4m3', 'e5m2'] - ), 'Only WA FP8 quant is supported' - else: - assert w.symmetric, 'Only symmetric quant is supported.' - assert w.bit in [4, 8], 'Supported quant: w4a16, w8a16, w8a8.' - if a: - assert a.symmetric, 'Only symmetric quant is supported.' - assert a.bit == 8, 'Supported quant: w4a16, w8a16, w8a8.' - - if config.save.get('save_vllm', False): - deploy_all_modality(blockwise_opts, 'vllm_quant') - elif config.save.get('save_lightllm', False): - deploy_all_modality(blockwise_opts, 'lightllm_quant') - elif config.save.get('save_sgl', False): - deploy_all_modality(blockwise_opts, 'sgl_quant') - - blockwise_opt.save_model(save_quant_path) - update_vllm_quant_config(blockwise_opt.model, config, save_quant_path) - - elif config.save.get('save_autoawq', False): - for modality_config in modality_configs: - assert ( - modality_config.weight.bit in [4] and 'act' not in modality_config - ), 'AutoAWQ supports only 4-bit weight-only quantization.' - assert ( - not modality_config.weight.symmetric - ), 'Only asymmetric quant is supported.' - - deploy_all_modality(blockwise_opts, 'autoawq_quant') - blockwise_opt.save_model(save_quant_path) - update_autoawq_quant_config(config, save_quant_path) - - elif config.save.get('save_mlcllm', False): - for modality_config in modality_configs: - assert ( - modality_config.weight.bit in [4] and 'act' not in modality_config - ), 'MlcLLM supports only 4-bit weight-only quantization.' - assert ( - not modality_config.weight.symmetric - ), 'Only asymmetric quant is supported.' - - deploy_all_modality(blockwise_opts, 'mlcllm_quant') - blockwise_opt.save_model(save_quant_path) - update_autoawq_quant_config(config, save_quant_path) - - elif config.save.get('save_lightx2v', False): - deploy_all_modality(blockwise_opts, 'lightx2v_quant') - blockwise_opt.save_model(save_quant_path) - update_lightx2v_quant_config(save_quant_path) - - if 'opencompass' in config: - assert config.save.get('save_trans', False) - cfg_path = config['opencompass']['cfg_path'] - output_path = config['opencompass']['output_path'] - eval_model_path = os.path.abspath(save_trans_path) - opencompass_cmd = ( - f'opencompass {cfg_path} -w {output_path} ' - f'--llmc_cfg {args.config} ' - f'--llmc_eval_mode quant ' - f'--llmc_model_path {eval_model_path}' - ) - logger.info(f'opencompass_cmd : {opencompass_cmd}') - os.system(opencompass_cmd) - dist.barrier() - - -if __name__ == '__main__': - logger.add(sys.stdout, level='INFO') - llmc_start_time = time.time() - parser = argparse.ArgumentParser() - parser.add_argument('--config', type=str, required=True) - parser.add_argument('--task_id', type=str, required=True) - args = parser.parse_args() - - with open(args.config, 'r') as file: - config = yaml.safe_load(file) - config = EasyDict(config) - - init_process_group(backend='nccl') - torch.cuda.set_device(int(os.environ['LOCAL_RANK'])) - - if int(os.environ['RANK']) != 0: - logger.remove() - - check_config(config) - - logger.info(f'args: {args}') - logger.info(f'config:\n{json.dumps(config, ensure_ascii=False, indent=4)}') - - print_important_package_version() - - logger.info(f'WORLD_SIZE : {int(os.environ["WORLD_SIZE"])}') - - seed_all(config.base.seed + int(os.environ['RANK'])) - - # Ensure only the main process creates directories - if int(os.environ['RANK']) == 0: - if 'save' in config: - if config.save.get('save_trans', False): - save_trans_path = os.path.join( - config.save.save_path, 'transformed_model' - ) - mkdirs(save_trans_path) - if config.save.get('save_trtllm', False): - save_trtllm_trans_path = os.path.join( - config.save.save_path, 'trtllm_transformed_model' - ) - mkdirs(save_trtllm_trans_path) - save_trtllm_engine_path = os.path.join( - config.save.save_path, 'trtllm_engine' - ) - mkdirs(save_trtllm_engine_path) - if config.save.get('save_vllm', False): - save_quant_path = os.path.join( - config.save.save_path, 'vllm_quant_model' - ) - mkdirs(save_quant_path) - if config.save.get('save_lightllm', False): - save_quant_path = os.path.join( - config.save.save_path, 'lightllm_quant_model' - ) - mkdirs(save_quant_path) - if config.save.get('save_sgl', False): - save_quant_path = os.path.join(config.save.save_path, 'sgl_quant_model') - mkdirs(save_quant_path) - if config.save.get('save_autoawq', False): - save_quant_path = os.path.join( - config.save.save_path, 'autoawq_quant_model' - ) - mkdirs(save_quant_path) - if config.save.get('save_mlcllm', False): - save_quant_path = os.path.join( - config.save.save_path, 'mlcllm_quant_model' - ) - mkdirs(save_quant_path) - if config.save.get('save_lightx2v', False): - save_quant_path = os.path.join( - config.save.save_path, 'lightx2v_quant_model' - ) - mkdirs(save_quant_path) - if config.save.get('save_fake', False): - save_fake_path = os.path.join(config.save.save_path, 'fake_quant_model') - mkdirs(save_fake_path) - - # Synchronize all processes after directory creation - dist.barrier() - - main(config) - - destroy_process_group() - - llmc_end_time = time.time() - llmc_duration_time = llmc_end_time - llmc_start_time - logger.info(f'llmc_duration_time: {llmc_duration_time} s') - logger.info('--- llmc finished ---') diff --git a/llmc/compression/__init__.py b/llmc/compression/__init__.py deleted file mode 100644 index fced6eb4c..000000000 --- a/llmc/compression/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .blockwise_optimization import BlockwiseOpt diff --git a/llmc/compression/blockwise_optimization.py b/llmc/compression/blockwise_optimization.py deleted file mode 100644 index 72823d1bd..000000000 --- a/llmc/compression/blockwise_optimization.py +++ /dev/null @@ -1,114 +0,0 @@ -import os -from abc import ABCMeta, abstractmethod - -import torch -from loguru import logger - - -class BlockwiseOpt(metaclass=ABCMeta): - def __init__(self, model, compress_config, input, padding_mask, config): - self.model = model - self.blocks = model.get_blocks() - self.quant_config = compress_config - self.sparsity_config = compress_config - self.input = input - self.padding_mask = padding_mask - self.data_free = False if self.input else True - self.config = config - self.block_idx = None - self.num_blocks = len(self.blocks) - if self.input: - for i in range(len(input['kwargs'])): - if 'use_cache' in input['kwargs'][i]: - input['kwargs'][i].pop('use_cache') - for i in range(len(input['kwargs'])): - if 'past_key_value' in input['kwargs'][i]: - input['kwargs'][i]['past_key_value'] = None - self.n_samples = 0 - for i in range(len(input['data'])): - self.n_samples += input['data'][i].shape[0] - - def run_block_loop(self): - for i in range(len(self.blocks)): - self.block_idx = i - logger.info( - f'\nblock index: {self.block_idx}/{len(self.blocks)} ' - f'\nblock: {self.blocks[self.block_idx]}' - ) - self.block_opt(self.blocks[self.block_idx]) - - if hasattr(self, 'save_scale') and self.save_scale: - os.makedirs(self.scale_path, exist_ok=True) - torch.save(self.act_scales, os.path.join(self.scale_path, 'scales.pth')) - if hasattr(self, 'act_shifts') and self.act_shifts: - torch.save(self.act_shifts, os.path.join(self.scale_path, 'shifts.pth')) - - if hasattr(self, 'save_clip') and self.save_clip: - os.makedirs(self.clip_path, exist_ok=True) - torch.save( - self.auto_clipper.weight_clips, - os.path.join(self.clip_path, 'clips.pth'), - ) - - def cache_input_hook(self, m, x, y, name, feat_dict): - inputs = [i.detach().cpu() for i in x] - if len(inputs) == 1: - inp = inputs[0] - if len(inp.shape) == 2: - inp = inp.unsqueeze(0) - feat_dict[name].append(inp) - else: - feat_dict[name].append(tuple(inputs)) - - def kv_cache_input_hook(self, attn_layer): - def hook_fn(module, args, kwargs): - kvcache = getattr(module, 'kvcache') - kwargs['past_key_value'] = kvcache - if self.config.eval.get('type', None) == 'decode_ppl': - # For eval decoding PPL (Perplexity). - past_seen_tokens = kvcache.get_seq_length() - cache_position = torch.arange( - past_seen_tokens, - past_seen_tokens + kwargs['hidden_states'].shape[1], - device=kwargs['hidden_states'].device, - ) - kwargs['cache_position'] = cache_position - position_ids = cache_position.unsqueeze(0) - kwargs['position_ids'] = position_ids - if 'position_embeddings' in kwargs: - kwargs['position_embeddings'] = self.model.rotary_emb( - kwargs['hidden_states'], position_ids - ) - if kwargs['hidden_states'].shape[1] == 1: - from .sparsification.kvsparse import ShadowKVCache - if isinstance(kvcache, ShadowKVCache): - hidden_states = kwargs['hidden_states'][:, -1, :].unsqueeze(0) - kwargs['hidden_states'] = hidden_states - bsz, q_len, _ = hidden_states.size() - tmp_query_states = \ - attn_layer.q_proj(hidden_states).view(bsz, - q_len, - -1, - attn_layer.head_dim).transpose(1, 2) - retrieval_position_ids = \ - kvcache.get_retrieval_position_ids(layer_idx=attn_layer.layer_idx, - query_states=tmp_query_states) - kwargs['retrieval_position_ids'] = retrieval_position_ids - kwargs['cos_sin_cache'] = self.cos_sin_cache - - return args, kwargs - - return hook_fn - - @abstractmethod - def block_opt(self, block): - pass - - def layer_init(self, layer): - pass - - def subset_init(self, subset): - pass - - def block_init(self, block): - pass diff --git a/llmc/compression/quantization/__init__.py b/llmc/compression/quantization/__init__.py deleted file mode 100644 index 2c08343e2..000000000 --- a/llmc/compression/quantization/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -from .adadim import AdaDim -from .awq import Awq -from .base_blockwise_quantization import BaseBlockwiseQuantization -from .dgq import DGQ -from .gptq import GPTQ -from .hqq import HQQ -from .kvquant import KiviQuantKVCache, NaiveQuantKVCache -from .llmint8 import LlmInt8 -from .module_utils import FakeQuantLinear -from .ntweak import NormTweaking -from .omniq import OmniQuant -from .osplus import OsPlus -from .quant import FloatQuantizer, IntegerQuantizer -from .quarot import Quarot -from .quik import QUIK -from .rtn import RTN -from .smoothquant import SmoothQuant -from .spqr import SpQR -from .tesseraq import TesseraQ diff --git a/llmc/compression/quantization/adadim.py b/llmc/compression/quantization/adadim.py deleted file mode 100644 index 1b5fe1e10..000000000 --- a/llmc/compression/quantization/adadim.py +++ /dev/null @@ -1,88 +0,0 @@ -import torch -from loguru import logger - -from llmc.utils.registry_factory import ALGO_REGISTRY - -from .base_blockwise_quantization import BaseBlockwiseQuantization -from .module_utils import FakeQuantLinear - - -@ALGO_REGISTRY -class AdaDim(BaseBlockwiseQuantization): - def __init__(self, model, quant_config, input, config, modality='language'): - super().__init__(model, quant_config, input, config, modality) - - def get_layer_out(self, x, layer): - with torch.no_grad(): - org_out = layer(x) - if isinstance(org_out, tuple): - org_out = org_out[0] - return org_out - - def search_dim_subset(self, layers_dict, input): - for name in layers_dict: - layer = layers_dict[name] - - loss_dict = {} - for dim in ['oc', 'ic']: - loss_mean = 0 - - weight = layer.weight.data.clone() - - q_weight = self.wquantizer.fake_quant_weight_dynamic( - weight, {'dim': dim} - ) - - for i in range(len(input)): - input[i] = input[i].to(layer.weight.data.device) - x = input[i] - - layer.weight.data = weight - org_out = self.get_layer_out(x, layer) - - layer.weight.data = q_weight - out = self.get_layer_out(x, layer) - - loss = (org_out - out).float().pow(2).mean().item() - loss_mean += x.shape[0] * 1.0 / self.n_samples * loss - - loss_dict[dim] = loss_mean - layer.weight.data = weight - - if loss_dict['ic'] < loss_dict['oc']: - layer.register_buffer('buf_qdim', torch.tensor(0)) - logger.info(f'Suggest layer {name} use per-input channel quant') - else: - layer.register_buffer('buf_qdim', torch.tensor(1)) - logger.info(f'Suggest layer {name} use per-output channel quant') - - def block_transform(self, block, input_feat, block_kwargs): - logger.info(f'Start transform the {self.block_idx}-th block') - subsets = self.model.get_subsets_in_block(block) - for index, subset in enumerate(subsets): - logger.info(f'subset: {subset}') - layers_dict = subset['layers'] - input_name = subset['input'][0] - - self.search_dim_subset(layers_dict, input_feat[input_name]) - - self.model.replace_module_subset( - FakeQuantLinear, - block, - subset, - self.block_idx, - self.get_replacement_params( - mode='fake_quant', w_only=self.w_only, name=None - ), - ) - - logger.info(f'End transform the {self.block_idx}-th block') - - def w_qdq(self, module, wquantizer): - weight = module.weight - args = {} - args['dim'] = 'ic' if module.buf_qdim == 0 else 'oc' - - weight = self.wquantizer.fake_quant_weight_dynamic(weight, args) - - return weight diff --git a/llmc/compression/quantization/attn_utils.py b/llmc/compression/quantization/attn_utils.py deleted file mode 100644 index 8eb4282ee..000000000 --- a/llmc/compression/quantization/attn_utils.py +++ /dev/null @@ -1,402 +0,0 @@ -import math - -import torch -import torch.nn as nn - - -class LlmcMatmul(nn.Module): - def __init__(self, a1_qdq=None, a2_qdq=None): - super().__init__() - self.a1_qdq = a1_qdq - self.a2_qdq = a2_qdq - self.calib = True - - def forward(self, x1, x2): - if self.a1_qdq is not None and not self.calib: - x1 = self.a1_qdq(x1, self) - if self.a2_qdq is not None and not self.calib: - x2 = self.a2_qdq(x2, self) - out = torch.matmul(x1, x2) - return out - - def __repr__(self): - return f'LlmcMatmul(calib={self.calib})' - - -class LlmcSoftmax(nn.Module): - def __init__(self, a_qdq=None): - super().__init__() - self.a_qdq = a_qdq - self.calib = True - - def forward(self, x, dim=-1, dtype=None): - if self.a_qdq is not None and not self.calib: - x = self.a_qdq(x, self) - out = nn.functional.softmax(x, dim=dim, dtype=dtype) - return out - - def __repr__(self): - return f'LlmcSoftmax(calib={self.calib})' - - -class LlmcViTSelfAttention(nn.Module): - def __init__( - self, - query, - key, - value, - num_attention_heads, - attention_head_size, - all_head_size, - dropout, - matmul_a1_qdq, - matmul_a2_qdq, - softmax_a_qdq, - ): - super().__init__() - self.num_attention_heads = num_attention_heads - self.attention_head_size = attention_head_size - self.all_head_size = all_head_size - self.query = query - self.key = key - self.value = value - - self.dropout = dropout - - self.matmul_1 = LlmcMatmul(matmul_a1_qdq, matmul_a2_qdq) - self.matmul_2 = LlmcMatmul(matmul_a1_qdq, matmul_a2_qdq) - self.softmax = LlmcSoftmax(softmax_a_qdq) - - def transpose_for_scores(self, x): - new_x_shape = x.size()[:-1] + ( - self.num_attention_heads, - self.attention_head_size, - ) - x = x.view(new_x_shape) - return x.permute(0, 2, 1, 3) - - def forward(self, hidden_states, head_mask=None, output_attentions=False): - mixed_query_layer = self.query(hidden_states) - - key_layer = self.transpose_for_scores(self.key(hidden_states)) - value_layer = self.transpose_for_scores(self.value(hidden_states)) - query_layer = self.transpose_for_scores(mixed_query_layer) - - attention_scores = self.matmul_1(query_layer, key_layer.transpose(-1, -2)) - attention_scores = attention_scores / math.sqrt(self.attention_head_size) - - attention_probs = self.softmax(attention_scores, dim=-1) - attention_probs = self.dropout(attention_probs) - - if head_mask is not None: - attention_probs = attention_probs * head_mask - - context_layer = self.matmul_2(attention_probs, value_layer) - - context_layer = context_layer.permute(0, 2, 1, 3).contiguous() - new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) - context_layer = context_layer.view(new_context_layer_shape) - - outputs = ( - (context_layer, attention_probs) if output_attentions else (context_layer,) - ) - - return outputs - - @classmethod - @torch.no_grad() - def new(cls, module, matmul_a1_qdq=None, matmul_a2_qdq=None, softmax_a_qdq=None): - query, key, value = module.query, module.key, module.value - num_attention_heads = module.num_attention_heads - attention_head_size = module.attention_head_size - all_head_size = module.all_head_size - dropout = module.dropout - new_module = cls( - query, - key, - value, - num_attention_heads, - attention_head_size, - all_head_size, - dropout, - matmul_a1_qdq, - matmul_a2_qdq, - softmax_a_qdq, - ) - return new_module - - def __repr__(self): - return ( - f'LlmcViTSelfAttention(\n' - f' (query): {self.query}\n' - f' (key): {self.key}\n' - f' (value): {self.value}\n' - f' (dropout): {self.dropout}\n' - f' (matmul_1): {self.matmul_1}\n' - f' (matmul_2): {self.matmul_2}\n' - f' (softmax): {self.softmax}\n' - f')' - ) - - -class LlmcDeepseekAttention(nn.Module): - def __init__( - self, - config, - layer_idx, - attention_dropout, - hidden_size, - num_heads, - max_position_embeddings, - rope_theta, - q_lora_rank, - qk_rope_head_dim, - kv_lora_rank, - v_head_dim, - qk_nope_head_dim, - q_head_dim, - is_causal, - q_proj, - q_a_proj, - q_a_layernorm, - q_b_proj, - kv_a_proj_with_mqa, - kv_a_layernorm, - kv_b_proj, - o_proj, - rotary_emb, - softmax_scale, - matmul_a1_qdq, - matmul_a2_qdq, - softmax_a_qdq, - ): - super().__init__() - self.config = config - self.layer_idx = layer_idx - self.attention_dropout = attention_dropout - self.hidden_size = hidden_size - self.num_heads = num_heads - self.max_position_embeddings = max_position_embeddings - self.rope_theta = rope_theta - self.q_lora_rank = q_lora_rank - self.qk_rope_head_dim = qk_rope_head_dim - self.kv_lora_rank = kv_lora_rank - self.v_head_dim = v_head_dim - self.qk_nope_head_dim = qk_nope_head_dim - self.q_head_dim = q_head_dim - self.is_causal = is_causal - self.q_proj = q_proj - self.q_a_proj = q_a_proj - self.q_a_layernorm = q_a_layernorm - self.q_b_proj = q_b_proj - self.kv_a_proj_with_mqa = kv_a_proj_with_mqa - self.kv_a_layernorm = kv_a_layernorm - self.kv_b_proj = kv_b_proj - self.o_proj = o_proj - self.rotary_emb = rotary_emb - self.softmax_scale = softmax_scale - self.matmul_1 = LlmcMatmul(matmul_a1_qdq, matmul_a2_qdq) - self.matmul_2 = LlmcMatmul(matmul_a1_qdq, matmul_a2_qdq) - self.softmax = LlmcSoftmax(softmax_a_qdq) - - def _shape(self, tensor, seq_len, bsz): - return ( - tensor.view(bsz, seq_len, self.num_heads, self.v_head_dim) - .transpose(1, 2) - .contiguous() - ) - - def rotate_half(self, x): - """Rotates half the hidden dims of the input.""" - x1 = x[..., : x.shape[-1] // 2] - x2 = x[..., x.shape[-1] // 2:] - return torch.cat((-x2, x1), dim=-1) - - def apply_rotary_pos_emb(self, q, k, cos, sin, position_ids, unsqueeze_dim=1): - cos = cos[position_ids].unsqueeze(unsqueeze_dim) - sin = sin[position_ids].unsqueeze(unsqueeze_dim) - - b, h, s, d = q.shape - q = q.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d) - - b, h, s, d = k.shape - k = k.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d) - - q_embed = (q * cos) + (self.rotate_half(q) * sin) - k_embed = (k * cos) + (self.rotate_half(k) * sin) - return q_embed, k_embed - - @classmethod - @torch.no_grad() - def new(cls, module, matmul_a1_qdq=None, matmul_a2_qdq=None, softmax_a_qdq=None): - - config = module.config - layer_idx = module.layer_idx - - attention_dropout = module.config.attention_dropout - hidden_size = module.config.hidden_size - num_heads = module.config.num_attention_heads - - max_position_embeddings = module.config.max_position_embeddings - rope_theta = module.config.rope_theta - q_lora_rank = module.config.q_lora_rank - qk_rope_head_dim = module.config.qk_rope_head_dim - kv_lora_rank = module.config.kv_lora_rank - v_head_dim = module.config.v_head_dim - qk_nope_head_dim = module.config.qk_nope_head_dim - q_head_dim = module.q_head_dim - is_causal = module.is_causal - - if q_lora_rank is None: - q_proj = module.q_proj - q_a_proj = None - q_a_layernorm = None - q_b_proj = None - else: - q_proj = None - q_a_proj = module.q_a_proj - q_a_layernorm = module.q_a_layernorm - q_b_proj = module.q_b_proj - - kv_a_proj_with_mqa = module.kv_a_proj_with_mqa - kv_a_layernorm = module.kv_a_layernorm - kv_b_proj = module.kv_b_proj - - o_proj = module.o_proj - rotary_emb = module.rotary_emb - - softmax_scale = module.softmax_scale - - new_module = cls( - config=config, - layer_idx=layer_idx, - attention_dropout=attention_dropout, - hidden_size=hidden_size, - num_heads=num_heads, - max_position_embeddings=max_position_embeddings, - rope_theta=rope_theta, - q_lora_rank=q_lora_rank, - qk_rope_head_dim=qk_rope_head_dim, - kv_lora_rank=kv_lora_rank, - v_head_dim=v_head_dim, - qk_nope_head_dim=qk_nope_head_dim, - q_head_dim=q_head_dim, - is_causal=is_causal, - q_proj=q_proj, - q_a_proj=q_a_proj, - q_a_layernorm=q_a_layernorm, - q_b_proj=q_b_proj, - kv_a_proj_with_mqa=kv_a_proj_with_mqa, - kv_a_layernorm=kv_a_layernorm, - kv_b_proj=kv_b_proj, - o_proj=o_proj, - rotary_emb=rotary_emb, - softmax_scale=softmax_scale, - matmul_a1_qdq=matmul_a1_qdq, - matmul_a2_qdq=matmul_a2_qdq, - softmax_a_qdq=softmax_a_qdq, - ) - - return new_module - - def forward( - self, - hidden_states, - attention_mask, - position_ids, - past_key_value, - output_attentions, - use_cache, - **kwargs, - ): - bsz, q_len, _ = hidden_states.size() - if self.q_lora_rank is None: - q = self.q_proj(hidden_states) - else: - q = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states))) - - q = q.view(bsz, q_len, self.num_heads, self.q_head_dim).transpose(1, 2) - q_nope, q_pe = torch.split( - q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1 - ) - - compressed_kv = self.kv_a_proj_with_mqa(hidden_states) - compressed_kv, k_pe = torch.split( - compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1 - ) - k_pe = k_pe.view(bsz, q_len, 1, self.qk_rope_head_dim).transpose(1, 2) - kv = ( - self.kv_b_proj(self.kv_a_layernorm(compressed_kv)) - .view(bsz, q_len, self.num_heads, self.qk_nope_head_dim + self.v_head_dim) - .transpose(1, 2) - ) - - k_nope, value_states = torch.split( - kv, [self.qk_nope_head_dim, self.v_head_dim], dim=-1 - ) - kv_seq_len = value_states.shape[-2] - if past_key_value is not None: - kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) - cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) - - q_pe, k_pe = self.apply_rotary_pos_emb(q_pe, k_pe, cos, sin, position_ids) - - query_states = k_pe.new_empty(bsz, self.num_heads, q_len, self.q_head_dim) - query_states[:, :, :, : self.qk_nope_head_dim] = q_nope - query_states[:, :, :, self.qk_nope_head_dim:] = q_pe - - key_states = k_pe.new_empty(bsz, self.num_heads, q_len, self.q_head_dim) - key_states[:, :, :, : self.qk_nope_head_dim] = k_nope - key_states[:, :, :, self.qk_nope_head_dim:] = k_pe - if past_key_value is not None: - cache_kwargs = {'sin': sin, 'cos': cos} # Specific to RoPE models - key_states, value_states = past_key_value.update( - key_states, value_states, self.layer_idx, cache_kwargs - ) - - attn_weights = ( - self.matmul_1(query_states, key_states.transpose(2, 3)) * self.softmax_scale - ) - - if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): - raise ValueError( - f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)},' - f'but is {attn_weights.size()}' - ) - assert attention_mask is not None - if attention_mask is not None: - if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): - raise ValueError( - f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)},' - f'but is {attention_mask.size()}' - ) - attn_weights = attn_weights + attention_mask - - # upcast attention to fp32 - attn_weights = self.softmax(attn_weights, dim=-1, dtype=torch.float32).to( - query_states.dtype - ) - attn_weights = nn.functional.dropout( - attn_weights, p=self.attention_dropout, training=self.training - ) - attn_output = self.matmul_2(attn_weights, value_states) - - if attn_output.size() != (bsz, self.num_heads, q_len, self.v_head_dim): - raise ValueError( - f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.v_head_dim)},' - f' but is {attn_output.size()}' - ) - - attn_output = attn_output.transpose(1, 2).contiguous() - - attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.v_head_dim) - - attn_output = self.o_proj(attn_output) - - if not output_attentions: - attn_weights = None - - return attn_output, attn_weights, past_key_value - - -_LLMC_ATTN_MAP_ = {'Vit': LlmcViTSelfAttention, 'DeepseekV2': LlmcDeepseekAttention} diff --git a/llmc/compression/quantization/auto_clip.py b/llmc/compression/quantization/auto_clip.py deleted file mode 100755 index 62425745e..000000000 --- a/llmc/compression/quantization/auto_clip.py +++ /dev/null @@ -1,281 +0,0 @@ -import gc -import os - -import torch -import torch.distributed as dist -from loguru import logger - -from .module_utils import _LLMC_LINEAR_TYPES_, _TRANSFORMERS_LINEAR_TYPES_ -from .utils import is_fp8_supported_gpu - -if is_fp8_supported_gpu(): - from .kernel import weight_cast_to_bf16, weight_cast_to_fp8 - logger.info('Successfully imported Triton kernel.') -else: - from .quant import weight_cast_to_bf16, weight_cast_to_fp8 - logger.info( - 'Triton kernel not available: non-Hopper GPU detected.\n' - 'Using LLMC Quantizer implementation instead.' - ) - - -class AutoClipper: - def __init__( - self, - w_only, - wquantizer, - aquantizer, - clip_version, - clip_sym, - save_clip, - padding_mask, - ): - self.wquantizer = wquantizer - self.aquantizer = aquantizer - self.clip_version = clip_version - self.clip_sym = clip_sym - self.save_clip = save_clip - self.padding_mask = padding_mask - self.weight_clips = {} - self.w_only = w_only - self.logit = lambda x: torch.log(x / (1 - x)) - - @torch.no_grad() - def run(self, block, block_idx, input_feat, n_sample_token): - for n, m in block.named_modules(): - if isinstance(m, tuple(_LLMC_LINEAR_TYPES_ + _TRANSFORMERS_LINEAR_TYPES_)): - if m.weight.data.dtype == torch.float8_e4m3fn: - is_fp8_weight = True - m.weight.data \ - = weight_cast_to_bf16(m.weight.data, - m.weight_scale_inv.data, - self.fp8_block_size).to(torch.bfloat16) - else: - is_fp8_weight = False - m = m.cuda() - if any([_ in n for _ in ['q_', 'k_', 'query', 'key', 'Wqkv']]): - if self.clip_version == 'v2': - m.register_buffer('buf_upbound_factor', None) - m.register_buffer('buf_lowbound_factor', None) - continue - - logger.info(f'clip layer: {n}') - inputs = ( - [torch.cat(input_feat[n])] - if len(input_feat[n]) != 1 - else input_feat[n] - ) - max_val, min_val = self.auto_clip_layer( - block_idx, n, m.weight, inputs, n_sample_token=n_sample_token - ) - - dist.all_reduce(max_val, op=dist.ReduceOp.SUM) - max_val /= int(os.environ['WORLD_SIZE']) - - dist.all_reduce(min_val, op=dist.ReduceOp.SUM) - min_val /= int(os.environ['WORLD_SIZE']) - - self.apply_clip(block_idx, m, min_val, max_val, n) - if is_fp8_weight: - m.weight.data, m.weight_scale_inv.data \ - = weight_cast_to_fp8(m.weight.data, self.fp8_block_size) - - @torch.no_grad() - def auto_clip_layer( - self, - block_idx, - layer_name, - w, - inputs, - n_grid=20, - max_shrink=0.5, - n_sample_token=512, - eps=0.0, - ): - - assert w.dim() == 2 - - if self.wquantizer.granularity == 'per_group': - group_size = self.wquantizer.group_size - else: - group_size = w.shape[1] - - try: - w = w.reshape(w.shape[0], 1, -1, group_size) - except RuntimeError: - w = self.wquantizer.reshape_tensor(w) - w = w.reshape(w.shape[0], 1, -1, group_size) - oc_batch_size = 256 if w.shape[0] % 256 == 0 else 64 # prevent OOM - assert w.shape[0] % oc_batch_size == 0 - - w_all = w - best_max_val_all, best_min_val_all = [], [] - for i_b in range(w.shape[0] // oc_batch_size): - w = w_all[i_b * oc_batch_size:(i_b + 1) * oc_batch_size] - - if self.clip_sym: - org_max_val = w.abs().amax(dim=-1, keepdim=True) - else: - org_max_val = w.amax(dim=-1, keepdim=True) - - org_min_val = w.amin(dim=-1, keepdim=True) - - best_max_val = org_max_val.clone() - best_min_val = org_min_val.clone() - min_errs = torch.ones_like(org_max_val) * 1e9 - org_out_dict = {} - for i_s in range(int(max_shrink * n_grid)): - if i_s == 0: - if self.clip_version == 'v2' and not self.w_only: - i_s += eps - err_mean = 0 - for i in range(len(inputs)): - inputs[i] = inputs[i].to(w.device) - x = inputs[i] - x = x.view(-1, x.shape[-1]) - if self.padding_mask and self.padding_mask[i].numel() == x.shape[0]: - mask_tmp = self.padding_mask[i].flatten() - x = x[mask_tmp.bool()] - try: - x = x.reshape(1, x.shape[0], -1, group_size) - except RuntimeError: - x = self.wquantizer.reshape_tensor(x) - x = x.reshape(1, x.shape[0], -1, group_size) - if n_sample_token is None: - n_sample_token = min(x.shape[1], 512) - step_size = max(1, x.shape[1] // n_sample_token) - x = x[:, 0::step_size] - if i in org_out_dict: - org_out = org_out_dict[i] - else: - org_out = (x * w).sum(dim=-1) - org_out_dict[i] = org_out - - max_val = org_max_val * (1 - i_s / n_grid) - - if self.clip_sym: - min_val = -max_val - else: - min_val = org_min_val * (1 - i_s / n_grid) - - q_w = self.fake_quantize_weight( - w, min_val, max_val, org_min_val, org_max_val - ) - q_x = self.fake_quantize_input(block_idx, x, layer_name) - - cur_out = (q_x * q_w).sum(dim=-1) - - # co, 1, n_group, 1 - err = (cur_out - org_out).pow(2).mean(dim=1).view(min_errs.shape) - err_mean += err - - del cur_out - - err_mean /= len(inputs) - cur_best_idx = err_mean < min_errs - - min_errs[cur_best_idx] = err_mean[cur_best_idx] - best_max_val[cur_best_idx] = max_val[cur_best_idx] - best_min_val[cur_best_idx] = min_val[cur_best_idx] - - best_max_val_all.append(best_max_val) - best_min_val_all.append(best_min_val) - - best_max_val = torch.cat(best_max_val_all, dim=0) - best_min_val = torch.cat(best_min_val_all, dim=0) - - del org_out - del org_out_dict - gc.collect() - torch.cuda.empty_cache() - return best_max_val.squeeze(1), best_min_val.squeeze(1) - - @torch.no_grad() - def apply_clip(self, block_idx, layer, min_val, max_val, layer_name): - if self.clip_version == 'v1': - max_val = max_val.to(layer.weight.device) - org_shape = layer.weight.shape - try: - layer.weight.data = layer.weight.data.reshape(*max_val.shape[:2], -1) - except RuntimeError: - layer.weight.data = self.wquantizer.reshape_tensor(layer.weight.data) - layer.weight.data = layer.weight.data.reshape(*max_val.shape[:2], -1) - if self.clip_sym: - min_val = -max_val - - layer.weight.data = torch.clamp(layer.weight.data, min_val, max_val) - try: - layer.weight.data = layer.weight.data.reshape(org_shape) - except RuntimeError: - layer.weight.data = self.wquantizer.restore_tensor( - layer.weight.data, org_shape - ) - elif self.clip_version == 'v2': - up_factor, low_factor = self.get_clip_factor( - block_idx, layer, min_val, max_val, layer_name - ) - layer.register_buffer('buf_upbound_factor', up_factor) - layer.register_buffer('buf_lowbound_factor', low_factor) - if self.save_clip: - if block_idx not in self.weight_clips: - self.weight_clips[block_idx] = dict() - n = f'{layer_name}.weight_quantizer.' - self.weight_clips[block_idx][n + 'upbound_factor'] = up_factor.cpu() - if low_factor is not None: - self.weight_clips[block_idx][ - n + 'lowbound_factor' - ] = low_factor.cpu() - else: - self.weight_clips[block_idx][n + 'lowbound_factor'] = None - else: - raise Exception('Not support other clip version') - - def get_clip_factor(self, block_idx, layer, min_val, max_val, layer_name): - org_min_val, org_max_val = self.wquantizer.get_minmax_range( - self.wquantizer.reshape_tensor(layer.weight.data) - ) - org_val_shape = org_max_val.shape - - if self.clip_sym: - abs_max_val = torch.max(org_max_val.abs(), org_min_val.abs()) - abs_max_val = abs_max_val.clamp(min=1e-5) - abs_max_val = abs_max_val.reshape(*max_val.shape[:2], -1) - up_factor = self.logit((max_val / abs_max_val)) - up_factor = up_factor.reshape(org_val_shape) - low_factor = None - else: - org_max_val = org_max_val.reshape(*max_val.shape[:2], -1) - - up_factor = self.logit((max_val / org_max_val)) - up_factor = up_factor.reshape(org_val_shape) - - org_min_val = org_min_val.reshape(*min_val.shape[:2], -1) - low_factor = self.logit((min_val / org_min_val)) - low_factor = low_factor.reshape(org_val_shape) - - return up_factor, low_factor - - def fake_quantize_weight(self, w, min_val, max_val, org_min_val, org_max_val): - if self.clip_version == 'v1': - cur_w = torch.clamp(w, min_val, max_val) - q_w = self.wquantizer.fake_quant_weight_dynamic(cur_w) - elif self.clip_version == 'v2': - low_factor = self.logit((min_val / org_min_val)) - up_factor = self.logit((max_val / org_max_val)) - tensor_range = self.wquantizer.get_learnable_range(w, low_factor, up_factor) - - scales, zeros, qmax, qmin = self.wquantizer.get_qparams( - tensor_range, w.device - ) - args = {'scales': scales, 'zeros': zeros, 'qmax': qmax, 'qmin': qmin} - q_w = self.wquantizer.fake_quant_weight_static(w, args) - else: - raise Exception('Not support other clip version') - return q_w - - def fake_quantize_input(self, block_idx, x, layer_name): - if not self.w_only: - q_x = self.aquantizer.fake_quant_act_dynamic(x) - else: - q_x = x - return q_x diff --git a/llmc/compression/quantization/awq.py b/llmc/compression/quantization/awq.py deleted file mode 100755 index 6fd9b87f6..000000000 --- a/llmc/compression/quantization/awq.py +++ /dev/null @@ -1,372 +0,0 @@ -import gc -import os - -import torch -import torch.distributed as dist -import torch.nn as nn -from loguru import logger - -from llmc.utils.registry_factory import ALGO_REGISTRY - -from .base_blockwise_quantization import BaseBlockwiseQuantization -from .utils import is_fp8_supported_gpu - -if is_fp8_supported_gpu(): - from .kernel import weight_cast_to_bf16, weight_cast_to_fp8 - logger.info('Successfully imported Triton kernel.') -else: - from .quant import weight_cast_to_bf16, weight_cast_to_fp8 - logger.info('Triton kernel not available (non-Hopper GPU detected). \ - Falling back to LLMC Quantizer implementation.') - -from .module_utils import (_LLMC_LINEAR_TYPES_, _LLMC_LN_TYPES_, - _TRANSFORMERS_LINEAR_TYPES_, - _TRANSFORMERS_LN_TYPES_, FakeQuantLinear, - LlmcFp8Linear) - - -@ALGO_REGISTRY -class Awq(BaseBlockwiseQuantization): - def __init__(self, model, quant_config, input, padding_mask, config): - super().__init__(model, quant_config, input, padding_mask, config) - special_config = self.quant_config.get('special', {}) - self.trans = special_config.get('trans', True) - self.trans_version = special_config.get('trans_version', 'v2') - self.save_scale = special_config.get('save_scale', False) - self.awq_bs = special_config.get('awq_bs', None) - self.save_mem = special_config.get('save_mem', True) - - @torch.no_grad() - def scaling_weight(self, w, scales, is_gqa): - if is_gqa: - scales_tmp = self.repeat_gqa_scales(scales) - else: - scales_tmp = scales - w.mul_(scales_tmp.view(1, -1)) - return w - - def get_weight_scale(self, layers_dict): - layers = list(layers_dict.values()) - total_scale = None - - for idx, _m in enumerate(layers): - if _m.weight.data.dtype == torch.float8_e4m3fn: - weight = weight_cast_to_bf16(_m.weight.data, - _m.weight_scale_inv.data, - self.fp8_block_size).to(torch.bfloat16) - else: - weight = _m.weight.data.clone() - org_shape = weight.shape - reshaped = self.wquantizer.reshape_tensor(weight) - abs_weights = reshaped.abs() - max_vals = abs_weights.amax(dim=1, keepdim=True) - layer_scale = abs_weights.div_(max_vals) - layer_scale = layer_scale.view(org_shape) - if total_scale is None: - total_scale = layer_scale.mean(0) - else: - total_scale.add_(layer_scale.mean(0)) - del weight, reshaped, abs_weights, max_vals, layer_scale - torch.cuda.empty_cache() - - return total_scale.div_(len(layers)) - - def get_act_scale(self, x): - if x.shape[0] == self._bs: - return x.abs().view(-1, x.shape[-1]).mean(0) - else: - batch_means = [] - b_num = x.shape[0] // self._bs - for num in range(b_num): - batch_x = x[num * self._bs:(num + 1) * self._bs] - batch_mean = batch_x.abs().view(-1, batch_x.shape[-1]).mean(0) - batch_means.append(batch_mean) - final_mean = sum(batch_means) / len(batch_means) - return final_mean - - @torch.no_grad() - def get_scales(self, prev_op, x, w_max, is_gqa, ratio): - if is_gqa: - x_tmp = prev_op(x) - w_tmp = self.get_weight_scale({'prev_op': prev_op}) - else: - x_tmp = x - w_tmp = w_max - - x_tmp = self.get_act_scale(x_tmp) - - if self.trans_version == 'v1' and not is_gqa: - scales = ( - (x_tmp.pow(ratio) / w_tmp.pow(1 - ratio)) - .clamp(min=1e-4) - .view(-1) - ) - elif self.trans_version == 'v2' or is_gqa: - scales = x_tmp.pow(ratio).clamp(min=1e-4).view(-1) - - scales = scales / (scales.max() * scales.min()).sqrt() - return scales - - def inspect_module_forward(self, x, inspect_module, kwargs): - if self._bs == x.shape[0]: - with torch.no_grad(): - out = inspect_module(x, **kwargs) - if isinstance(out, tuple): - out = out[0] - return out - else: - outs = [] - b_num = x.shape[0] // self._bs - for num in range(b_num): - _x = x[num * self._bs:(num + 1) * self._bs] - out = inspect_module(_x, **kwargs) - if isinstance(out, tuple): - out = out[0] - outs.append(out) - return torch.cat(outs, dim=0) - - @torch.no_grad() - def get_original_out(self, x, inspect_module, subset_kwargs): - with torch.no_grad(): - org_out = self.inspect_module_forward(x, inspect_module, subset_kwargs) - return org_out - - def calculate_loss(self, org_out, out): - if out.shape[0] == self._bs: - return (org_out - out).float().pow(2).mean().item() - else: - total_loss = 0.0 - b_num = org_out.shape[0] // self._bs - for num in range(b_num): - _org_out = org_out[num * self._bs:(num + 1) * self._bs] - _out = out[num * self._bs:(num + 1) * self._bs] - single_loss = (_org_out - _out).float().pow(2).mean().item() - total_loss += single_loss - return total_loss / b_num - - def fake_quantize_weight(self, fc, scales, is_gqa, layer_name): - if fc.weight.data.dtype == torch.float8_e4m3fn: - tmp_weight_data = weight_cast_to_bf16(fc.weight.data, - fc.weight_scale_inv.data, - self.fp8_block_size).to(torch.bfloat16) - else: - tmp_weight_data = fc.weight.data - - tmp_weight_data = self.scaling_weight(tmp_weight_data, scales, is_gqa) - tmp_weight_data = self.wquantizer.fake_quant_weight_dynamic(tmp_weight_data) - - if fc.weight.data.dtype == torch.float8_e4m3fn: - fc.weight.data, fc.weight_scale_inv.data \ - = weight_cast_to_fp8(tmp_weight_data, self.fp8_block_size) - else: - fc.weight.data = tmp_weight_data - - return fc.weight - - def fake_quantize_input(self, x_tmp, layers_dict): - if self._bs == x_tmp.shape[0]: - x_tmp = self.aquantizer.fake_quant_act_dynamic(x_tmp) - else: - outs = [] - for i in range(x_tmp.shape[0]): - _x = x_tmp[i] - _x = self.aquantizer.fake_quant_act_dynamic(_x) - outs.append(_x) - x_tmp = torch.stack(outs) - return x_tmp - - @torch.no_grad() - def search_scale_subset( - self, - prev_op, - layers_dict, - input, - inspect_module, - is_gqa, - subset_kwargs - ): - - if self.awq_bs is None: - self._bs = input[0].shape[0] - else: - self._bs = self.awq_bs - - w_max = self.get_weight_scale(layers_dict) - # grid search for ratio - best_error = float('inf') - best_scales = None - n_grid = 20 - org_sd = {k: v.cpu() for k, v in inspect_module.state_dict().items()} - - org_out_dict = {} - for n in range(n_grid): - loss_mean = 0 - scales_mean = 0 - for i in range(len(input)): - input[i] = input[i].to(next(inspect_module.parameters()).device) - x = input[i] - if isinstance(subset_kwargs, list): - kwargs = subset_kwargs[i] - else: - kwargs = subset_kwargs - if i in org_out_dict: - org_out = org_out_dict[i] - else: - org_out = self.get_original_out(x, inspect_module, kwargs) - org_out_dict[i] = org_out - - ratio = n * 1 / n_grid - scales = self.get_scales(prev_op, x, w_max, is_gqa, ratio) - for layer_name in layers_dict: - fc = layers_dict[layer_name] - fc.weight = self.fake_quantize_weight(fc, scales, is_gqa, layer_name) - - x_tmp = self.scaling_input(x, scales, is_gqa) - - if not self.w_only: - x_tmp = self.fake_quantize_input(x_tmp, layers_dict) - - out = self.inspect_module_forward(x_tmp, inspect_module, kwargs) - - if self.padding_mask and org_out.shape[1] == self.padding_mask[i].shape[-1]: - org_out = org_out * self.padding_mask[i].unsqueeze(dim=-1).to(org_out.device) # noqa - out = out * self.padding_mask[i].unsqueeze(dim=-1).to(out.device) - - loss = self.calculate_loss(org_out, out) - - if len(input) == 1: - n_samples = x.shape[0] - else: - n_samples = self.n_samples - - loss_mean += x.shape[0] * 1.0 / n_samples * loss - scales_mean += x.shape[0] * 1.0 / n_samples * scales - inspect_module.load_state_dict(org_sd) - is_best = loss_mean < best_error - if is_best: - best_error = loss_mean - best_scales = scales_mean - if self.save_mem: - del org_out - del out - gc.collect() - torch.cuda.empty_cache() - - # Synchronize across ranks - best_error_tensor = torch.tensor([best_error], device='cuda') - dist.all_reduce(best_error_tensor, op=dist.ReduceOp.MIN) - global_best_error = best_error_tensor.item() - - # Identify the rank with the minimum loss - global_best_rank = torch.tensor([dist.get_rank() - if abs(best_error - global_best_error) < 1e-5 - else -1], - device='cuda') - dist.all_reduce(global_best_rank, op=dist.ReduceOp.MAX) - global_best_rank = global_best_rank.item() - - # Broadcast the best scales from the rank with the minimum loss to all ranks - if dist.get_rank() == global_best_rank: - dist.broadcast(best_scales, src=global_best_rank) - else: - best_scales = torch.zeros_like(best_scales, device='cuda') - dist.broadcast(best_scales, src=global_best_rank) - - del org_out_dict - gc.collect() - torch.cuda.empty_cache() - return best_scales - - @torch.no_grad() - def block_transform(self, block, input_feat, block_kwargs): - if self.trans: - super().block_transform(block, input_feat, block_kwargs) - - if self.weight_clip: - logger.info('auto_clip start') - logger.info(f'clip version: {self.clip_version}') - self.auto_clipper.run( - block, - self.block_idx, - input_feat, - n_sample_token=self.config.calib.get('seq_len', None) - ) - logger.info('auto_clip finished') - else: - logger.info('disable weight clip') - - @torch.no_grad() - def subset_transform( - self, - subset, - input_feat, - subset_kwargs, - ): - layers_dict = subset['layers'] - prev_op = subset['prev_op'] - input_name = subset['input'][0] - inspect_module = subset['inspect'] - do_trans = subset.get('do_trans', True) - if not do_trans: - logger.info('do_trans is set to False. Do not transform this subset.') - return - - if self.config['model']['type'] == 'Starcoder': - if isinstance(prev_op[0], (nn.Linear, FakeQuantLinear)): - logger.info('Do not transform this subset.') - return - - assert ( - len(prev_op) in (0, 1) - ), 'Only support single prev_op. If multi prev_ops, code need to be updated.' - - if len(prev_op) == 0 or (len(prev_op) == 1 and prev_op[0] is None): - logger.info('Cannot apply scale. Do not transform this subset.') - return - - if isinstance( - prev_op[0], - tuple( - _LLMC_LN_TYPES_ + - _TRANSFORMERS_LN_TYPES_ + - _LLMC_LINEAR_TYPES_ + - _TRANSFORMERS_LINEAR_TYPES_ - ), - ): - layers = list(layers_dict.values()) - - if ( - isinstance(prev_op[0], (nn.Linear, FakeQuantLinear, LlmcFp8Linear)) - and prev_op[0].out_features != layers[0].in_features * 3 - and prev_op[0].out_features != layers[0].in_features * 2 - and prev_op[0].out_features != layers[0].in_features - ): - - if self.has_gqa and self.do_gqa_trans: - is_gqa = True - input_keys = list(input_feat.keys()) - input_name = input_keys[input_keys.index(input_name) - 1] - else: - logger.info('Cannot apply scale. Do not transform this subset.') - return - else: - is_gqa = False - - scale = self.search_scale_subset( - prev_op[0], - layers_dict, - input_feat[input_name], - inspect_module, - is_gqa, - subset_kwargs - ) - - self.apply_scale(scale, prev_op, layers) - self.update_input_feat(scale, input_feat, layers_dict, is_gqa) - - if self.save_scale: - for n in layers_dict: - layer_name = f'{self.model.block_name_prefix}.{self.block_idx}.{n}' - self.act_scales[layer_name] = scale - else: - logger.info('Do not transform this subset.') diff --git a/llmc/compression/quantization/base_blockwise_quantization.py b/llmc/compression/quantization/base_blockwise_quantization.py deleted file mode 100755 index 33927f35d..000000000 --- a/llmc/compression/quantization/base_blockwise_quantization.py +++ /dev/null @@ -1,1038 +0,0 @@ -import copy -import functools -import gc -import json -import os -import re -from collections import defaultdict -from functools import partial - -import torch -import torch.distributed as dist -import torch.nn as nn -from loguru import logger - -from llmc.utils.registry_factory import KV_REGISTRY, TOKEN_REDUCTION_REGISTRY - -from ..blockwise_optimization import BlockwiseOpt -from .attn_utils import _LLMC_ATTN_MAP_ -from .auto_clip import AutoClipper -from .utils import is_fp8_supported_gpu - -if is_fp8_supported_gpu(): - from .kernel import weight_cast_to_bf16, weight_cast_to_fp8 - logger.info('Successfully imported Triton kernel.') -else: - from .quant import weight_cast_to_bf16, weight_cast_to_fp8 - logger.info( - 'Triton kernel not available: non-Hopper GPU detected.\n' - 'Using LLMC Quantizer implementation instead.' - ) - -from .hadamard_utils import apply_exact_had_to_linear, get_hadK -from .module_utils import (_LLMC_LINEAR_TYPES_, _LLMC_LN_TYPES_, - _REALQUANT_LINEAR_MAP_, _TRANSFORMERS_LINEAR_TYPES_, - _TRANSFORMERS_LN_TYPES_, EffcientFakeQuantLinear, - FakeQuantLinear, LlmcActFn, OriginFloatLinear, - RotateLinear) -from .quant import FloatQuantizer, IntegerQuantizer, Weight48IntegerQuantizer - - -class BaseBlockwiseQuantization(BlockwiseOpt): - def __init__(self, model, quant_config, input, padding_mask, config): - super().__init__(model, quant_config, input, padding_mask, config) - self.set_quant_config() - - def w_qdq(self, module, wquantizer): - args = {'lowbound_factor': None, 'upbound_factor': None} - if hasattr(module, 'buf_lowbound_factor'): - args['lowbound_factor'] = module.buf_lowbound_factor - if hasattr(module, 'buf_upbound_factor'): - args['upbound_factor'] = module.buf_upbound_factor - - if module.weight.data.dtype == torch.float8_e4m3fn: - tmp_weight \ - = weight_cast_to_bf16(module.weight, - module.weight_scale_inv, - self.fp8_block_size).to(torch.bfloat16) - else: - tmp_weight = module.weight - - tmp_weight = wquantizer.fake_quant_weight_dynamic(tmp_weight, args) - - if module.weight.data.dtype == torch.float8_e4m3fn: - tmp_weight, module.weight_scale_inv.data \ - = weight_cast_to_fp8(tmp_weight, self.fp8_block_size) - - return tmp_weight - - def w_q(self, module, wquantizer): - return wquantizer.real_quant_weight_dynamic(module.weight.data) - - def a_qdq(self, act, module, aquantizer, input_index=0): - if self.act_static: - args = { - 'scales': (getattr(module, f'buf_act_scales_{input_index}', None)), - 'zeros': (getattr(module, f'buf_act_zeros_{input_index}', None)), - 'qmax': (getattr(module, f'buf_act_qmax_{input_index}', None)), - 'qmin': (getattr(module, f'buf_act_qmin_{input_index}', None)), - } - return aquantizer.fake_quant_act_static(act, args) - else: - return aquantizer.fake_quant_act_dynamic(act) - - def get_replacement_params(self, mode='fake_quant', w_only=False, name=None): - params_dict = {} - if mode in ['fake_quant', 'fake_quant_wo_kv']: - params_dict['a_qdq'] = ( - partial(self.a_qdq, aquantizer=self.aquantizer) - if not w_only - else None - ) - params_dict['w_qdq'] = partial(self.w_qdq, wquantizer=self.wquantizer) - - elif mode in _REALQUANT_LINEAR_MAP_.keys(): - params_dict['w_q'] = partial(self.w_q, wquantizer=self.wquantizer) - params_dict['quant_config'] = self.quant_config - - elif mode == 'online_rotate': - had_K, K = get_hadK( - self.intermediate_size if 'down_proj' in name else self.num_heads - ) - params_dict = { - 'had_K': had_K, - 'K': K, - 'online_full_had': 'down_proj' in name, - 'online_partial_had': 'o_proj' in name, - 'had_dim': ( - None if 'down_proj' in name else self.hidden_size // self.num_heads - ), - 'fp32_had': self.fp32_had, - } - - elif mode == 'quant_attn': - params_dict = { - 'matmul_a1_qdq': partial( - self.a_qdq, aquantizer=self.aquantizer, input_index=0 - ), - 'matmul_a2_qdq': partial( - self.a_qdq, aquantizer=self.aquantizer, input_index=1 - ), - 'softmax_a_qdq': ( - partial(self.a_qdq, aquantizer=self.aquantizer) - if self.quant_softmax - else None - ), - } - - elif mode == 'quant_act_fn': - params_dict = {'a_qdq': partial(self.a_qdq, aquantizer=self.aquantizer)} - - return params_dict - - def set_quant_config(self): - if self.model.torch_dtype == torch.float8_e4m3fn: - self.fp8_block_size = self.model.fp8_block_size - - if 'ignored_layers' in self.config: - self.mixed_precision = True - self.ignored_block_ids = self.config.ignored_layers.get('block_ids', []) - self.ignored_layer_names = self.config.ignored_layers.get('layer_names', []) - self.ignored_speical_names = self.config.ignored_layers.get('speical_names', []) - else: - self.mixed_precision = False - logger.info(f'mixed_precision = {self.mixed_precision}') - - self.quant_out = self.quant_config.get('quant_out', False) - self.tp = self.quant_config.get('tp', 1) - self.quant_config['weight']['tp'] = self.tp - - # select quantizer - # weight - quant_type = self.quant_config['weight'].get('quant_type', 'int-quant') - if quant_type == 'int-quant': - if self.quant_config['weight']['bit'] == 48: - self.weight_quant_module = Weight48IntegerQuantizer - else: - self.weight_quant_module = IntegerQuantizer - elif quant_type == 'float-quant': - self.weight_quant_module = FloatQuantizer - logger.info(f'The used Weight Quant Module is {self.weight_quant_module}') - self.wquantizer = self.weight_quant_module(**self.quant_config['weight']) - - # act - if 'act' in self.quant_config: - if self.quant_config['weight']['granularity'] == 'per_block': - assert self.quant_config['act']['granularity'] == 'per_group' - assert self.quant_config['act']['group_size'] \ - == self.quant_config['weight']['block_size'] - self.w_only = False - quant_type = self.quant_config['act'].get('quant_type', 'int-quant') - if quant_type == 'int-quant': - if self.quant_config['act']['bit'] == 48: - self.act_quant_module = Weight48IntegerQuantizer - else: - self.act_quant_module = IntegerQuantizer - elif quant_type == 'float-quant': - self.act_quant_module = FloatQuantizer - self.quant_config['act']['tp'] = self.tp - self.aquantizer = self.act_quant_module(**self.quant_config['act']) - self.act_static = self.quant_config['act'].get('static', False) - if self.act_static: - assert ( - self.quant_config['act']['granularity'] == 'per_tensor' - ), 'Only support per_tensor static quant' - self.quant_attn = self.quant_config['act'].get('quant_attn', False) - if self.quant_attn: - assert self.config['model']['type'] in ['Vit', 'DeepseekV2'] - self.quant_softmax = self.quant_config['act'].get( - 'quant_softmax', False - ) - self.quant_act_fn = self.quant_config['act'].get('quant_act_fn', False) - else: - self.w_only = True - self.aquantizer = None - self.act_static = False - self.quant_attn = False - self.quant_softmax = False - self.quant_act_fn = False - - # set kv cache quant config - if 'kvcache' in self.quant_config: - self.quant_config['kvcache']['static'] = self.act_static - kv_special_cfg = self.quant_config['kvcache'].get('special', {}) - act_static_cfg = {} - if self.act_static: - act_static_cfg.update(self.config.calib.n_sample) - act_static_cfg.update(self.config.calib.bs) - kv_quant_type = self.quant_config['kvcache'].get('quant_type', 'int-quant') - self.kv_module = KV_REGISTRY[self.quant_config['kvcache']['method']]( - kv_quant_type, self.quant_config['kvcache'], - self.model.model_config.num_hidden_layers, **kv_special_cfg, **act_static_cfg - ) - self.quant_kvcache = True - self.model.kvcache_buffer.append(self.kv_module) - else: - self.quant_kvcache = False - - # set special quant config - special_config = self.quant_config.get('special', {}) - self.true_sequential = special_config.get('true_sequential', False) - - # set weight clip config - self.weight_clip = special_config.get('weight_clip', False) - if self.weight_clip or special_config.get('search_clip_init', False): - self.save_clip = special_config.get('save_clip', False) - if self.save_clip: - self.clip_path = special_config['clip_path'] - self.clip_version = special_config.get('clip_version', 'v1') - if self.clip_version == 'v2': - assert self.wquantizer.calib_algo == 'learnable' - clip_sym = special_config.get('clip_sym', self.wquantizer.sym) - self.auto_clipper = AutoClipper( - w_only=self.w_only, - wquantizer=self.wquantizer, - aquantizer=self.aquantizer, - clip_version=self.clip_version, - clip_sym=clip_sym, - save_clip=self.save_clip, - padding_mask=self.padding_mask, - ) - - # set transformation config - self.save_scale = special_config.get('save_scale', False) - if self.save_scale: - self.scale_path = special_config['scale_path'] - self.act_scales = {} - - # set online-rotation config - self.online_rotate = special_config.get('online_rotate', False) - if self.online_rotate: - assert ( - self.config['model']['type'] in ['Opt', 'Llama'] - ), 'Please set online_rotate=False' - self.fp32_had = special_config.get('fp32_had', False) - if self.quant_config.modality != 'video_gen': - self.set_model_config() - self.modality = self.quant_config.modality - logger.info(f'self.quant_objects : {self.quant_config.modality}') - - # set token reduction config - if 'token_reduction' in self.quant_config: - token_reduction_cfg = self.quant_config['token_reduction'] - TOKEN_REDUCTION_REGISTRY[self.quant_config['token_reduction']['method']]( - token_reduction_cfg, self.model, self.blocks - ) - - self.do_gqa_trans = special_config.get('do_gqa_trans', False) - logger.info(f'self.do_gqa_trans : {self.do_gqa_trans}') - - def set_model_config(self): - self.hidden_size = self.model.model_config.hidden_size - self.num_heads = self.model.model_config.num_attention_heads - self.head_dim = self.hidden_size // self.num_heads - if hasattr(self.model.model_config, 'intermediate_size'): - self.intermediate_size = self.model.model_config.intermediate_size - if hasattr(self.model.model_config, 'num_key_value_heads'): - self.num_key_value_heads = self.model.model_config.num_key_value_heads - self.num_key_value_groups = self.num_heads // self.num_key_value_heads - if self.num_key_value_groups > 1: - self.has_gqa = True - else: - self.has_gqa = False - else: - self.has_gqa = False - - def replace_rotate_linears(self, block): - for n, m in block.named_modules(): - if isinstance(m, nn.Linear) and ( - 'down_proj' in n or 'o_proj' in n or 'fc2' in n or 'out_proj' in n - ): - subset = {'layers': {n: m}} - self.model.replace_module_subset( - RotateLinear, - block, - subset, - None, - self.get_replacement_params( - mode='online_rotate', w_only=self.w_only, name=n - ), - ) - - def replace_act_fn(self, block, extra_modules): - act_fn_dict = self.model.get_act_fn_in_block(block) - layers_dict = {'layers': act_fn_dict} - self.model.replace_module_subset( - LlmcActFn, - block, - layers_dict, - self.block_idx, - self.get_replacement_params( - mode='quant_act_fn', w_only=self.w_only, name=None - ), - ) - extra_modules.update(act_fn_dict) - - def replace_attention(self, block, extra_modules): - attn_layers_dict = self.model.get_attn_in_block(block) - layers_dict = {'layers': attn_layers_dict} - attn_module = _LLMC_ATTN_MAP_[self.config['model']['type']] - self.model.replace_module_subset( - attn_module, - block, - layers_dict, - self.block_idx, - self.get_replacement_params( - mode='quant_attn', w_only=self.w_only, name=None - ), - ) - - matmul_modules = self.model.get_matmul_in_block(block) - softmax_modules = ( - self.model.get_softmax_in_block(block) if self.quant_softmax else {} - ) - extra_modules.update(matmul_modules) - extra_modules.update(softmax_modules) - - @torch.no_grad() - def collect_block_qparams(self, block): - named_linears = self.model.get_block_linears(block) - for n, m in named_linears.items(): - args = {} - if hasattr(m, 'buf_lowbound_factor'): - args['lowbound_factor'] = m.buf_lowbound_factor - if hasattr(m, 'buf_upbound_factor'): - args['upbound_factor'] = m.buf_upbound_factor - - if m.weight.data.dtype == torch.float8_e4m3fn: - tmp_weight_data = weight_cast_to_bf16(m.weight.data, - m.weight_scale_inv.data, - self.fp8_block_size).to(torch.bfloat16) - else: - tmp_weight_data = m.weight.data - - ( - tensor, - scales, - zeros, - max_int, - min_int, - ) = self.wquantizer.get_tensor_qparams(tmp_weight_data, args=args) - - m.register_buffer('buf_scales', scales.detach()) - m.register_buffer('buf_zeros', zeros.detach()) - m.register_buffer('buf_qmax', torch.tensor(max_int).to(self.dev)) - m.register_buffer('buf_qmin', torch.tensor(min_int).to(self.dev)) - - def block_forward(self, block, input_data=None): - output = [] - - if input_data is None: - input_data = self.input['data'] - - for i in range(len(input_data)): - input_data[i] = input_data[i].to(device=next(block.parameters()).device) - for k in self.input['kwargs'][i]: - if torch.is_tensor(self.input['kwargs'][i][k]): - self.input['kwargs'][i][k] = self.input['kwargs'][i][k].to( - device=next(block.parameters()).device - ) - if isinstance(self.input['kwargs'][i][k], tuple): - self.input['kwargs'][i][k] = tuple( - tmp.to(device=next(block.parameters()).device) - for tmp in self.input['kwargs'][i][k] - ) - with torch.no_grad(): - out = block(input_data[i], **self.input['kwargs'][i]) - if isinstance(out, tuple): - out = out[0] - output.append(out) - return output - - def block_opt(self, block): - - if self.quant_kvcache: - self.register_kv_cache(block) - - block = block.cuda() - named_linears = self.model.get_block_linears(block) - extra_modules = self.model.get_extra_modules(block) - - if self.quant_attn: - self.replace_attention(block, extra_modules) - if self.quant_act_fn: - self.replace_act_fn(block, extra_modules) - - input_feat_modules = { - k: v for d in [named_linears, extra_modules] for k, v in d.items() - } - logger.info(f'input_feat_modules: {input_feat_modules}') - input_feat = defaultdict(list) - - handles = self.register_hooks(input_feat_modules, input_feat) - - self.block_init(block) - - self.run(block, input_feat, handles) - - block = block.cpu() - del input_feat, block - gc.collect() - torch.cuda.empty_cache() - - def register_hooks(self, input_feat_modules, input_feat): - handles = [] - if not self.data_free: - for name in input_feat_modules: - handles.append( - input_feat_modules[name].register_forward_hook( - functools.partial( - self.cache_input_hook, name=name, feat_dict=input_feat - ) - ) - ) - return handles - - def run(self, block, input_feat, handles): - if not self.data_free: - if self.quant_out: - self.block_forward(block) - else: - self.input['data'] = self.block_forward(block) - - for h in handles: - h.remove() - torch.cuda.empty_cache() - - self.block_transform(block, input_feat, self.input['kwargs']) - else: - self.block_transform(block) - - if not self.data_free and self.quant_out: - self.model.replace_module_block( - FakeQuantLinear, - block, - self.block_idx, - self.get_replacement_params( - mode='fake_quant', w_only=self.w_only, name=None - ), - ) - self.set_non_linear_mode('fake_quant', block, False) - self.input['data'] = self.block_forward(block) - torch.cuda.empty_cache() - - def block_transform(self, block, input_feat, block_kwargs): - logger.info(f'Start transform the {self.block_idx}-th block') - subsets = self.model.get_subsets_in_block(block) - - if self.act_static: - self.register_non_linear_qparams(block, input_feat) - - self.set_non_linear_mode('fake_quant', block, False) - - for index, subset in enumerate(subsets): - logger.info(f'subset: {subset}') - layers_dict = subset['layers'] - input_name = subset['input'][0] - inspect_has_kwargs = subset['has_kwargs'] - if inspect_has_kwargs: - if 'sub_keys' in subset: - subset_kwargs = [] - for i in range(len(block_kwargs)): - for k, v in subset['sub_keys'].items(): - subset_kwargs.append({k: block_kwargs[i][v]}) - else: - subset_kwargs = block_kwargs - else: - subset_kwargs = {} - self.subset_transform( - subset, - input_feat, - subset_kwargs, - ) - if self.act_static: - input_tensors = copy.deepcopy(input_feat[input_name]) - self.register_act_qparams(layers_dict, input_tensors) - del input_tensors - - if self.true_sequential and index != len(subsets) - 1: - next_subset = subsets[index + 1] - input_feat_subset = self.rehook_next_subset(block, subset, next_subset) - input_feat.update(input_feat_subset) - - self.set_non_linear_mode('fake_quant', block, True) - logger.info(f'End transform the {self.block_idx}-th block') - - def rehook_next_subset(self, block, subset, next_subset): - self.subset_init(next_subset) - self.model.replace_module_subset( - FakeQuantLinear, - block, - subset, - self.block_idx, - self.get_replacement_params( - mode='fake_quant', w_only=self.w_only, name=None - ), - ) - - input_feat_subset = defaultdict(list) - input_feat_modules = next_subset['layers'] - handles = self.register_hooks(input_feat_modules, input_feat_subset) - - self.block_forward(block) - for h in handles: - h.remove() - - return input_feat_subset - - def collect_layers_weights(self, layers, tensor_parallelize_style=None): - weights = [] - for _m in layers: - if _m.weight.data.dtype == torch.float8_e4m3fn: - fp8_scale = _m.weight_scale_inv - tmp_weight = weight_cast_to_bf16(_m.weight, fp8_scale).to(torch.bfloat16) - weights.append(tmp_weight) - else: - weights.append(_m.weight) - return weights - - @torch.no_grad() - def register_kv_cache(self, block): - attn_layers_dict = self.model.get_attn_in_block(block) - attn_layer = attn_layers_dict[list(attn_layers_dict.keys())[0]] - setattr(attn_layer, 'kvcache', self.kv_module) - attn_layer.register_forward_pre_hook( - self.kv_cache_input_hook(attn_layer), with_kwargs=True - ) - - @torch.no_grad() - def register_non_linear_qparams(self, block, input_feat): - layer_types = [ - ('quant_attn', self.model.get_matmul_in_block), - ('quant_softmax', self.model.get_softmax_in_block, 'quant_attn'), - ('quant_act_fn', self.model.get_act_fn_in_block), - ] - - for mode, layer_func, *dependency in layer_types: - if getattr(self, mode, True) and all( - getattr(self, dep, True) for dep in dependency - ): - layers_dict = layer_func(block) - for name, layer in layers_dict.items(): - input_tensors = copy.deepcopy(input_feat[name]) - self.register_act_qparams({name: layer}, input_tensors) - del input_tensors - - @torch.no_grad() - def register_act_qparams(self, layers_dict, act_tensors): - scales_list, zeros_list, qmin_list, qmax_list = ( - self.aquantizer.get_batch_tensors_qparams(act_tensors) - ) - world_size = int(os.environ['WORLD_SIZE']) - - for i, (scales, zeros, qmin, qmax) in enumerate( - zip(scales_list, zeros_list, qmin_list, qmax_list) - ): - scales = scales.cuda() - dist.all_reduce(scales, op=dist.ReduceOp.SUM) - scales = scales / world_size - - for name, layer in layers_dict.items(): - if not isinstance( - layer, tuple(_LLMC_LINEAR_TYPES_ + _TRANSFORMERS_LINEAR_TYPES_) - ): - continue - layer.register_buffer(f'buf_act_scales_{i}', scales) - layer.register_buffer(f'buf_act_zeros_{i}', zeros.cuda()) - layer.register_buffer(f'buf_act_qmin_{i}', qmin.cuda()) - layer.register_buffer(f'buf_act_qmax_{i}', qmax.cuda()) - - @torch.no_grad() - def repeat_gqa_scales(self, scales): - scales = scales.view(1, self.num_key_value_heads, self.head_dim) - scales = torch.repeat_interleave(scales, dim=1, repeats=self.num_key_value_groups) - return scales - - @torch.no_grad() - def apply_scale(self, scales, prev_op, layers): - assert ( - len(prev_op) == 1 - ), 'Only support single prev_op. If multi prev_ops, code need to be updated.' - if isinstance( - prev_op[0], tuple(_LLMC_LINEAR_TYPES_ + _TRANSFORMERS_LINEAR_TYPES_) - ): - assert len(layers) == 1 - logger.info('apply scale between fc and fc') - self.scale_fc_fc(prev_op[0], layers[0], scales) - elif isinstance(prev_op[0], tuple(_LLMC_LN_TYPES_ + _TRANSFORMERS_LN_TYPES_)): - logger.info('apply scale between ln and fc') - self.scale_ln_fcs(prev_op[0], layers, scales) - else: - raise NotImplementedError(f'prev_op {type(prev_op[0])} not supported yet!') - - @torch.no_grad() - def apply_shift(self, shifts, prev_op, layers): - if shifts is None: - return - - assert ( - len(prev_op) == 1 - ), 'Only support single prev_op. If multi prev_ops, code need to be updated.' - if isinstance( - prev_op[0], tuple(_LLMC_LINEAR_TYPES_ + _TRANSFORMERS_LINEAR_TYPES_) - ): - assert len(layers) == 1 - self.shift_fc_fc(prev_op[0], layers[0], shifts) - elif isinstance(prev_op[0], tuple(_LLMC_LN_TYPES_ + _TRANSFORMERS_LN_TYPES_)): - self.shift_ln_fcs(prev_op[0], layers, shifts) - else: - raise NotImplementedError(f'prev_op {type(prev_op[0])} not supported yet!') - - @torch.no_grad() - def scale_fc_fc(self, fc1, fc2, scales): - scales = scales.to(fc1.weight.device) - if fc1.out_features == fc2.in_features * 3: - logger.info('fc1.out_features == fc2.in_features * 3') - num_heads = self.model.get_num_attention_heads() - fc1.weight.t_() - org_shape = fc1.weight.shape - fc1.weight.data = fc1.weight.data.reshape(org_shape[0] * num_heads, 3, -1) - value = fc1.weight.data[:, 2, :].reshape(org_shape[0], -1) - fc1.weight.data[:, 2, :] = value.div(scales.view(-1)).reshape( - fc1.weight[:, 2, :].shape - ) - fc1.weight.data = fc1.weight.data.reshape(org_shape).t_() - if hasattr(fc1, 'bias') and fc1.bias is not None: - fc1.bias.data = fc1.bias.data.reshape(num_heads, 3, -1) - - value = fc1.bias.data[:, 2, :].reshape(-1) - - fc1.bias.data[:, 2, :] = value.div(scales.view(-1)).reshape( - fc1.bias[:, 2, :].shape - ) - fc1.bias.data = fc1.bias.data.reshape(-1) - elif fc1.out_features == fc2.in_features * 2: - logger.info('fc1.out_features == fc2.in_features * 2') - fc1.weight.data[fc1.weight.data.shape[0] // 2:].div_(scales.view(-1, 1)) - if hasattr(fc1, 'bias') and fc1.bias is not None: - fc1.bias.data[fc1.bias.data.shape[0] // 2:].div_(scales.view(-1)) - elif fc1.out_features == fc2.in_features: - logger.info('fc1.out_features == fc2.in_features') - assert fc1.out_features == fc2.in_features - - if hasattr(fc1, 'bias') and fc1.bias is not None: - fc1.bias.div_(scales.view(-1)) - - if fc1.weight.data.dtype == torch.float8_e4m3fn: - fp8_scale = fc1.weight_scale_inv - tmp_weight_data = weight_cast_to_bf16(fc1.weight.data, - fp8_scale, - self.fp8_block_size).to(torch.bfloat16) - tmp_weight_data.div_(scales.view(-1, 1)) - - fc1.weight.data, fc1.weight_scale_inv.data \ - = weight_cast_to_fp8(tmp_weight_data, self.fp8_block_size) - else: - fc1.weight.div_(scales.view(-1, 1)) - - elif self.has_gqa and self.do_gqa_trans: - if hasattr(fc1, 'bias') and fc1.bias is not None: - fc1.bias.div_(scales.view(-1)) - fc1.weight.div_(scales.view(-1, 1)) - - if fc1.out_features != fc2.in_features: - logger.info('GQA scale this fc-fc.') - scales = self.repeat_gqa_scales(scales) - else: - logger.error(f'fc1.out_features: {fc1.out_features}') - logger.error(f'fc2.in_features: {fc2.in_features}') - raise Exception('Can not scale this fc-fc.') - - if fc2.weight.data.dtype == torch.float8_e4m3fn: - fp8_scale = fc2.weight_scale_inv - tmp_weight_data = weight_cast_to_bf16(fc2.weight.data, - fp8_scale, - self.fp8_block_size).to(torch.bfloat16) - tmp_weight_data.mul_(scales.view(1, -1)) - fc2.weight.data, fc2.weight_scale_inv.data \ - = weight_cast_to_fp8(tmp_weight_data, self.fp8_block_size) - else: - fc2.weight.mul_(scales.view(1, -1)) - - @torch.no_grad() - def shift_fc_fc(self, fc1, fc2, shifts): - if fc1.out_features == fc2.in_features * 3: - num_heads = self.model.get_model_config().to_dict().get('n_head', None) - if hasattr(fc1, 'bias') and fc1.bias is not None: - fc1.bias.data = fc1.bias.data.reshape(num_heads, 3, -1) - - value = fc1.bias.data[:, 2, :].reshape(-1) - fc1.bias.data[:, 2, :] = (value - shifts).reshape( - fc1.bias[:, 2, :].shape - ) - fc1.bias.data = fc1.bias.data.reshape(-1) - else: - assert fc1.out_features == fc2.in_features - - if hasattr(fc1, 'bias') and fc1.bias is not None: - fc1.bias.sub_(shifts) - - if hasattr(fc2, 'bias') and fc2.bias is not None: - fc2.bias.add_(fc2.weight @ shifts) - else: - if hasattr(self, 'use_shift') and self.use_shift: - del fc2.bias - fc2.register_buffer('bias', fc2.weight @ shifts) - - @torch.no_grad() - def shift_ln_fcs(self, ln, fcs, shifts): - if not isinstance(fcs, list): - fcs = [fcs] - - if self.model.has_bias(): - ln.bias.sub_(shifts) - - for fc in fcs: - if self.model.has_bias(): - fc.bias.add_(fc.weight @ shifts) - else: - if hasattr(self, 'use_shift') and self.use_shift: - del fc.bias - fc.register_buffer('bias', fc.weight @ shifts) - - for p in ln.parameters(): - assert torch.isnan(p).sum() == 0 - for fc in fcs: - for p in fc.parameters(): - assert torch.isnan(p).sum() == 0 - - @torch.no_grad() - def scale_ln_fcs(self, ln, fcs, scales): - if not isinstance(fcs, list): - fcs = [fcs] - - scales = scales.to(ln.weight.device) - scales = scales.to(ln.weight.dtype) - - ln.weight.div_(scales) - - if hasattr(ln, 'bias') and ln.bias is not None: - ln.bias.div_(scales) - - for fc in fcs: - if fc.weight.data.dtype == torch.float8_e4m3fn: - fp8_scale = fc.weight_scale_inv.data - tmp_weight_data = weight_cast_to_bf16(fc.weight.data, - fp8_scale, - self.fp8_block_size).to(torch.bfloat16) - tmp_weight_data.mul_(scales.view(1, -1)) - fc.weight.data, fc.weight_scale_inv.data \ - = weight_cast_to_fp8(tmp_weight_data, self.fp8_block_size) - else: - fc.weight.mul_(scales.view(1, -1)) - - for p in ln.parameters(): - assert torch.isnan(p).sum() == 0 - for fc in fcs: - for p in fc.parameters(): - assert torch.isnan(p).sum() == 0 - - def rotate_pre_layers(self, pre_layers, Q): - for layer in pre_layers: - if layer.weight.data.dtype == torch.float8_e4m3fn: - layer.weight.data \ - = weight_cast_to_bf16(layer.weight.data, - layer.weight_scale_inv.data, - self.fp8_block_size).to(torch.bfloat16) - dtype = layer.weight.dtype - layer.weight.data = torch.matmul(layer.weight.data.double(), Q).to(dtype) - - if hasattr(layer, 'weight_scale_inv'): - layer.weight.data, layer.weight_scale_inv.data \ - = weight_cast_to_fp8(layer.weight.data, self.fp8_block_size) - torch.cuda.empty_cache() - - def rotate_post_layers(self, post_layers, Q, exact_had=False): - for layer in post_layers: - if layer.weight.data.dtype == torch.float8_e4m3fn: - layer.weight.data \ - = weight_cast_to_bf16(layer.weight.data, - layer.weight_scale_inv.data, - self.fp8_block_size).to(torch.bfloat16) - dtype = layer.weight.dtype - layer.weight.data = torch.matmul(Q.T, layer.weight.data.double()).to(dtype) - - if exact_had and self.online_rotate: - apply_exact_had_to_linear(layer, had_dim=-1, output=False) - - if hasattr(layer, 'bias') and layer.bias is not None: - b = layer.bias.data.to(torch.float64) - layer.bias.data = torch.matmul(Q.T, b).to(dtype) - - if hasattr(layer, 'weight_scale_inv'): - layer.weight.data, layer.weight_scale_inv.data \ - = weight_cast_to_fp8(layer.weight.data, self.fp8_block_size) - torch.cuda.empty_cache() - - def rotate_embeddings(self, Q): - embeddings = self.model.get_embed_layers() - assert len(embeddings) == 1 - for layer in embeddings: - dtype = layer.weight.data.dtype - W = layer.weight.data.to(device=self.dev, dtype=torch.float64) - layer.weight.data = torch.matmul(W, Q).to(device='cpu', dtype=dtype) - - def rotate_head(self, Q): - heads = self.model.get_head_layers() - for layer in heads: - dtype = layer.weight.data.dtype - W = layer.weight.data.to(device=self.dev, dtype=torch.float64) - layer.weight.data = torch.matmul(W, Q).to(device='cpu', dtype=dtype) - - def fuse_ln_fcs(self, ln, fcs): - for fc in fcs: - if fc.weight.data.dtype == torch.float8_e4m3fn: - fc.weight.data \ - = weight_cast_to_bf16(fc.weight.data, - fc.weight_scale_inv.data, - self.fp8_block_size).to(torch.bfloat16) - fc_dtype = fc.weight.dtype - if hasattr(ln, 'bias') and ln.bias is not None: - W = fc.weight.data.double().clone() - fc.weight.data = (fc.weight.data.double() * ln.weight.double()).to(fc_dtype) - if hasattr(ln, 'bias') and ln.bias is not None: - if fc.bias is None: - fc.bias = torch.nn.Parameter( - torch.zeros(fc.out_features, dtype=torch.float64) - ) - fc.bias.data = fc.bias.data.double().to(device=W.device) + torch.matmul( - W, ln.bias.double() - ) - fc.bias.data = fc.bias.data.to(fc_dtype) - - if hasattr(fc, 'weight_scale_inv'): - fc.weight.data, fc.weight_scale_inv.data \ - = weight_cast_to_fp8(fc.weight.data, self.fp8_block_size) - torch.cuda.empty_cache() - - def remove_mean_from_embed(self): - embeddings = self.model.get_embed_layers() - for layer in embeddings: - W = layer.weight.data.double() - layer.weight.data = (W - W.mean(dim=-1, keepdim=True)).to( - layer.weight.data.dtype - ) - - def bake_mean_into_fc(self, fc): - fc_dtype = fc.weight.dtype - W_ = fc.weight.data.double() - fc.weight.data = W_ - W_.mean(dim=-2, keepdim=True) - fc.weight.data = fc.weight.data.to(fc_dtype) - if hasattr(fc, 'bias') and fc.bias is not None: - b_ = fc.bias.data.double() - fc.bias.data = b_ - b_.mean() - fc.bias.data = fc.bias.data.to(fc_dtype) - - @torch.no_grad() - def scaling_input(self, x, scales, is_gqa): - if is_gqa: - scales_tmp = self.repeat_gqa_scales(scales) - else: - scales_tmp = scales - if hasattr(self, '_bs') and self._bs < x.shape[0]: - x_tmp = torch.empty_like(x) - for i, batch in enumerate(x): - batch_scale = scales_tmp.view(1, -1) - x_tmp[i] = batch / batch_scale - else: - x_tmp = x / scales_tmp.view(1, -1) - return x_tmp - - @torch.no_grad() - def update_input_feat(self, scale, input_feat, layers_dict, is_gqa): - for layer_name in layers_dict: - for i in range(len(input_feat[layer_name])): - inp = input_feat[layer_name][i] - scale = scale.to(inp.device) - input_feat[layer_name][i] = self.scaling_input(inp, scale, is_gqa) - - @torch.no_grad() - def set_non_linear_mode(self, quant_format, module, mode): - assert mode in [True, False] - if quant_format != 'fake_quant': - return - for name, m in module.named_modules(): - if 'kvcache' in name: - continue - if getattr(m, 'calib', None) is not None: - m.calib = mode - - def set_no_quant_layer(self): - if self.ignored_speical_names: - assert hasattr(self.model, 'block_name_prefix'), \ - 'block_name_prefix missing in model' - ignored_block_ids = [] - for item in self.ignored_block_ids: - match = re.match(r'(\d+)-(\d+)', str(item)) - if match: - start, end = int(match.group(1)), int(match.group(2)) - ignored_block_ids.extend(range(start, end + 1)) - else: - ignored_block_ids.append(int(item)) - - for idx, block in enumerate(self.blocks): - for n, m in block.named_modules(): - if idx in ignored_block_ids and n in self.ignored_layer_names: - m.register_buffer('no_quant', torch.tensor(True)) - else: - layer_name = f'{self.model.block_name_prefix}.{idx}.{n}' - if layer_name in self.ignored_speical_names: - m.register_buffer('no_quant', torch.tensor(True)) - - @torch.no_grad() - def deploy(self, quant_format, keep_device=False): - logger.info(f'-- deploy_{quant_format}_model start --') - logger.info(f'quant_config : {self.quant_config}') - - module_mapping = { - 'origin_float': OriginFloatLinear, - 'fake_quant': EffcientFakeQuantLinear, - 'fake_quant_wo_kv': EffcientFakeQuantLinear, - } - module_mapping.update(_REALQUANT_LINEAR_MAP_) - - if quant_format not in module_mapping: - raise NotImplementedError( - f"Quant format '{quant_format}' is not implemented." - ) - if self.mixed_precision and 'quant' in quant_format: - self.set_no_quant_layer() - - module = module_mapping[quant_format] - if self.modality == 'vision': - self.model.replace_vision_module_all( - module, - self.get_replacement_params(mode=quant_format, w_only=self.w_only), - keep_device=keep_device, - ) - if self.modality == 'language': - self.model.replace_language_module_all( - module, - self.get_replacement_params(mode=quant_format, w_only=self.w_only), - keep_device=keep_device, - ) - if self.modality == 'video_gen': - self.model.replace_video_gen_module_all( - module, - self.get_replacement_params(mode=quant_format, w_only=self.w_only), - keep_device=keep_device, - ) - - self.set_non_linear_mode(quant_format, self.model.model, False) - - if self.quant_kvcache: - if quant_format == 'origin_float': - self.kv_module.use_org_kv = True - elif quant_format == 'fake_quant_wo_kv': - self.kv_module.use_org_kv = True - elif quant_format == 'fake_quant': - self.kv_module.use_org_kv = False - if self.act_static: - self.kv_module.calib = False - - if self.model.mm_model is not None: - logger.info(f'Now, the mm_model is: {self.model.mm_model}') - - logger.info(f'-- deploy_{quant_format}_model done --') - - @torch.no_grad() - def copy_tokenizer(self, path): - if self.model.tokenizer is not None: - self.model.tokenizer.save_pretrained(path) - logger.info('copy tokenizer done --') - else: - logger.info('no tokenizer, skip --') - - @torch.no_grad() - def contiguous_params(self): - if self.model.mm_model is not None: - for name, param in self.model.mm_model.named_parameters(): - if not param.is_contiguous(): - param.data = param.data.contiguous() - - for name, param in self.model.mm_model.named_buffers(): - if not param.is_contiguous(): - param.data = param.data.contiguous() - else: - for name, param in self.model.model.named_parameters(): - if not param.is_contiguous(): - param.data = param.data.contiguous() - - for name, param in self.model.model.named_buffers(): - if not param.is_contiguous(): - param.data = param.data.contiguous() - - @torch.no_grad() - def save_model(self, path): - if int(os.environ['RANK']) != 0: - return - self.contiguous_params() - if self.config.model.type in ['Llava', 'InternVL2', 'Mllama', 'Qwen2vl']: - self.model.vlm_model.language_model = self.model.get_model() - self.model.vlm_model.save_pretrained(path) - logger.info('save model done --') - self.copy_tokenizer(path) - elif self.config.model.type in ['Qwen2Audio']: - self.model.alm_model.language_model = self.model.get_model() - self.model.alm_model.save_pretrained(path) - logger.info('save model done --') - self.copy_tokenizer(path) - elif self.config.model.type in ['InternOmni']: - self.model.avlm_model.language_model = self.model.get_model() - self.model.avlm_model.save_pretrained(path) - logger.info('save model done --') - self.copy_tokenizer(path) - else: - self.model.get_model().save_pretrained(path) - logger.info('save model done --') - self.copy_tokenizer(path) diff --git a/llmc/compression/quantization/dgq.py b/llmc/compression/quantization/dgq.py deleted file mode 100755 index e982b0eb4..000000000 --- a/llmc/compression/quantization/dgq.py +++ /dev/null @@ -1,296 +0,0 @@ -import gc - -import torch -import torch.nn as nn -from loguru import logger - -from llmc.utils.registry_factory import ALGO_REGISTRY - -from .base_blockwise_quantization import BaseBlockwiseQuantization -from .module_utils import _LLMC_LN_TYPES_, _TRANSFORMERS_LN_TYPES_ -from .quant import IntegerQuantizer - - -@ALGO_REGISTRY -class DGQ(BaseBlockwiseQuantization): - def __init__(self, model, quant_config, input, padding_mask, config): - super().__init__(model, quant_config, input, padding_mask, config) - self.model_dtype = next(self.model.model.parameters()).dtype - - def w_qdq(self, module, wquantizer): - scales = module.buf_scales - zeros = module.buf_zeros - scale8 = module.buf_scale8 - s = (scales * scale8.reshape(-1, 1)).reshape(-1, 1) - int_max = torch.round(127 / scales) - upper = torch.clamp(zeros + int_max, max=15.0).reshape(-1, 1) - lower = torch.clamp(zeros - int_max, min=0.0).reshape(-1, 1) - args = {} - args['scales'] = s.reshape(-1, 1) - args['zeros'] = zeros.reshape(-1, 1) - args['qmax'] = upper - args['qmin'] = lower - # logger.info(f"s.shape : {s.shape}") - # logger.info(f"scales.shape : {scales.shape}") - # logger.info(f"zeros.shape : {zeros.shape}") - # logger.info(f"upper.shape : {upper.shape}") - # logger.info(f"lower.shape : {lower.shape}") - return self.wquantizer_w4.fake_quant_weight_static(module.weight.data, args) - - def set_quant_config(self): - logger.info(f'self.quant_config : {self.quant_config}') - if 'quant_out' in self.quant_config and self.quant_config['quant_out']: - self.quant_out = True - else: - self.quant_out = False - self.quant_type = self.quant_config.get('quant_type', 'int-quant') - assert self.quant_type != 'float-quant', 'DGQ do not support Float quant now.' - # set weight quant config - self.wquantizer_w4 = IntegerQuantizer(**self.quant_config['weight']['w_1']) - perchannel_setting = { - 'bit': self.quant_config['weight']['w_1']['bit'], - 'symmetric': self.quant_config['weight']['w_1']['symmetric'], - 'granularity': 'per_channel', - } - self.wquantizer_w4_perchannel = IntegerQuantizer(**perchannel_setting) - self.wquantizer_w8 = IntegerQuantizer(**self.quant_config['weight']['w_2']) - - # set act quant config - if 'act' in self.quant_config and self.quant_config['act'] is not None: - self.w_only = False - self.aquantizer = IntegerQuantizer(**self.quant_config['act']) - else: - self.w_only = True - - @torch.no_grad() - def get_weight_scale(self, layers): - weights = self.collect_layers_weights(layers) - scale = torch.cat( - [fc.abs().max(dim=0, keepdim=True)[0] for fc in weights], dim=0 - ) - scale = scale.max(dim=0)[0].clamp(min=1e-5) - del weights - gc.collect() - torch.cuda.empty_cache() - return scale - - @torch.no_grad() - def get_act_scale(self, tensors): - scale_max = None - for x in tensors: - x = x.cuda() - x = x.abs().view(-1, x.shape[-1]) - comming_max = torch.max(x, dim=0)[0].float() - if scale_max is not None: - scale_max = torch.max(scale_max, comming_max) - else: - scale_max = comming_max - x = x.cpu() - return scale_max - - @torch.no_grad() - def search_scale_subset(self, layers, tensors): - w_max = self.get_weight_scale(layers) - x_max = self.get_act_scale(tensors) - x_max = x_max.to(dtype=w_max.dtype, device=w_max.device) - scale = (x_max.pow(0.5) / w_max.pow(0.5)).clamp(min=1e-5) - return scale - - @torch.no_grad() - def smoothquant_transform(self, prev_op, layers, tensors): - scale = self.search_scale_subset(layers, tensors) - self.apply_scale(scale, prev_op, layers) - - @torch.no_grad() - def smooth_llama_mlp(self, upp, downp, act_scales): - device, dtype = downp.weight.device, downp.weight.dtype - - # downp_scales = downp.weight.abs().max(dim=0)[0].cuda().float().clamp(min=1e-5) - - maxsv, inds = act_scales.sort() - basl = int(len(act_scales) * 0.005 + 1.5) # hyperparameter - baseline = maxsv[-basl] - if baseline < 1e-4: - return - scales = act_scales / baseline - scales[act_scales <= baseline] = 1.0 - # downp_m = downp_scales[inds[-basl:]] - # downp_redu = 50 * downp_scales.max() / downp_m - scales[inds[-basl:]] = scales[inds[-basl:]] - # print(scales.max()) - - act_scales /= scales - scales = scales.to(device=device, dtype=dtype) - logger.info(f'scales.device : {scales.device}') - # gatep.weight.div_(scales) - upp.weight.data.div_(scales.view(-1, 1)) - - if hasattr(upp, 'bias') and upp.bias is not None: - upp.bias.div_(scales) - downp.weight.data.mul_(scales.view(1, -1)) - - @torch.no_grad() - def search_scale_zero_layer(self, layer, input_feat): - w4_group_size = self.wquantizer_w4.group_size - weight_tmp = layer.weight.data.clone() - org_w_shape = weight_tmp.shape - device = weight_tmp.device - dtype = weight_tmp.dtype - w_out_channels, w_in_channels = weight_tmp.shape - input_feat = input_feat.to(device) - input_feat = input_feat.squeeze() - assert w_in_channels % w4_group_size == 0 - best_scales = torch.ones( - [w_out_channels, w_in_channels // w4_group_size], - dtype=self.model_dtype, - device=device, - ) - best_zeros = torch.ones( - [w_out_channels, w_in_channels // w4_group_size], - dtype=self.model_dtype, - device=device, - ) - for group_index in range(w_in_channels // w4_group_size): - inp_LxG = input_feat[ - :, group_index * w4_group_size: (group_index + 1) * w4_group_size - ] - weight_OxG = weight_tmp[ - :, group_index * w4_group_size: (group_index + 1) * w4_group_size - ] - # docformatter: off - """ - For each pair of (inp_LxG weight_OxG), - we can all consider it as per channel quantization. - Let's consider weight as - the transpose matrix of the weight in PyTorch's linear layer. - - output = input x weight - - input => [L * in] - weight => [in * out] - - Split each input channel according to groups. - input => (in/G) * [L * G] - weight => (in/G) * [G * out] - - [L * G] x [G * out] is per channel quantization. - The scale shape is [out * 1]. - input x weight is per group quantization. - The scale shape is [out * (in/G)]. - """ - # docformatter: on - org_out_LxO = inp_LxG @ (weight_OxG.t()) - grid = 20 - best_loss = torch.full( - [weight_OxG.shape[0]], float('inf'), device=device, dtype=dtype - ) - w_max = weight_OxG.amax(dim=-1, keepdim=True) - w_min = weight_OxG.amin(dim=-1, keepdim=True) - for i in range(grid): - ratio = 1.02 - (i + 1) / grid * 0.22 - weight_OxG = weight_OxG.clamp(w_min * ratio, w_max * ratio) - ( - _, - scales, - zeros, - qmax, - qmin, - ) = self.wquantizer_w4_perchannel.get_tensor_qparams(weight_OxG) - # Perchannel do not need reshape and restore tensor. - weight_OxG_fq = self.wquantizer_w4_perchannel.quant_dequant( - weight_OxG, scales, zeros, qmax, qmin - ) - if not self.w_only: - inp_LxG_fq = self.a_qdq(inp_LxG) - else: - inp_LxG_fq = inp_LxG - out_LxO = inp_LxG_fq @ (weight_OxG_fq.t()) - loss = (org_out_LxO - out_LxO).squeeze().pow(2).mean(dim=0).view(-1) - - best_idx = best_loss > loss - best_loss[best_idx] = loss[best_idx] - best_scales[:, group_index][best_idx] = scales.view(-1)[best_idx] - best_zeros[:, group_index][best_idx] = zeros.view(-1)[best_idx] - - grid = 80 - org_out = input_feat @ weight_tmp.t() - best_loss = torch.full( - [w_out_channels], float('inf'), device=device, dtype=dtype - ) - best_scale8 = torch.zeros( - (w_out_channels,), dtype=self.model_dtype, device=device - ) - for i in range(grid): - ratio = 1.02 - (i + 1) / grid * 0.82 - w_max = weight_tmp.abs().amax(dim=-1, keepdim=True) - ( - _, - qscales_8, - zeros, - qmax, - qmin, - ) = self.wquantizer_w8.get_tensor_qparams( - weight_tmp.clamp(-w_max * ratio, w_max * ratio) - ) - qscale = torch.round(best_scales / qscales_8).clamp(min=1.0) - int_max = torch.round(127 / qscales_8) - upper = torch.clamp(best_zeros + int_max, max=15.0).reshape(-1, 1) - lower = torch.clamp(best_zeros - int_max, min=0.0).reshape(-1, 1) - qscale_q = (qscale * qscales_8).reshape(-1, 1) - - weight_tmp_fq = self.wquantizer_w4.reshape_tensor(weight_tmp) - weight_tmp_fq = self.wquantizer_w4.quant_dequant( - weight_tmp_fq, qscale_q, best_zeros.reshape(-1, 1), upper, lower - ) - weight_tmp_fq = self.wquantizer_w4.restore_tensor( - weight_tmp_fq, org_w_shape - ) - - if not self.w_only: - input_feat_fq = self.a_qdq(input_feat) - else: - input_feat_fq = input_feat - - out = input_feat_fq @ (weight_tmp_fq.t()) - loss = (org_out - out).abs().pow(2).mean(dim=0).view(-1) - best_idx = (best_loss > loss).view(-1) - best_loss[best_idx] = loss[best_idx] - best_scale8[best_idx] = qscales_8[best_idx].view(-1) - - best_scales = torch.round(best_scales / best_scale8.view(-1, 1)).clamp(min=1.0) - return best_scales, best_zeros, best_scale8 - - @torch.no_grad() - def search_scale_zero_subset(self, layers_dict, input_feat): - logger.info(f'layers_dict : {layers_dict}') - for layer_name in layers_dict: - logger.info(f'search for : {layer_name}') - best_scales, best_zeros, best_scale8 = self.search_scale_zero_layer( - layers_dict[layer_name], input_feat - ) - # logger.info(f"best_scales : {best_scales}, {best_scales.shape}") - # logger.info(f"best_zeros : {best_zeros}, {best_zeros.shape}") - # logger.info(f"best_scale8 : {best_scale8}, {best_scale8.shape}") - layers_dict[layer_name].register_buffer('buf_scales', best_scales) - layers_dict[layer_name].register_buffer('buf_zeros', best_zeros) - layers_dict[layer_name].register_buffer('buf_scale8', best_scale8) - - @torch.no_grad() - def subset_transform( - self, - subset, - input_feat, - subset_kwargs, - ): - layers_dict = subset['layers'] - prev_op = subset['prev_op'] - input_name = subset['input'][0] - - layers = list(layers_dict.values()) - if isinstance(prev_op[0], tuple(_LLMC_LN_TYPES_ + _TRANSFORMERS_LN_TYPES_)): - self.smoothquant_transform(prev_op, layers, input_feat[input_name]) - # For llama model down proj - if 'mlp.down_proj' in layers_dict: - scale = self.search_scale_subset(layers, input_feat[input_name]) - self.smooth_llama_mlp(prev_op[0], layers[0], scale) - self.search_scale_zero_subset(layers_dict, input_feat[input_name][0]) diff --git a/llmc/compression/quantization/gptq.py b/llmc/compression/quantization/gptq.py deleted file mode 100644 index 263d9db02..000000000 --- a/llmc/compression/quantization/gptq.py +++ /dev/null @@ -1,478 +0,0 @@ -import copy -import functools -import math -import os -from abc import ABCMeta, abstractmethod -from collections import defaultdict - -import torch -import torch.distributed as dist -import torch.nn as nn -import transformers -from loguru import logger - -from llmc.utils.registry_factory import ALGO_REGISTRY - -from .base_blockwise_quantization import BaseBlockwiseQuantization -from .module_utils import (_LLMC_LINEAR_TYPES_, _TRANSFORMERS_LINEAR_TYPES_, - FakeQuantLinear, RotateLinear) - - -@ALGO_REGISTRY -class GPTQ(BaseBlockwiseQuantization): - def __init__( - self, model, quant_config, input, padding_mask, config, modality='language' - ): - super().__init__(model, quant_config, input, padding_mask, config) - self.dev = torch.device('cuda') - self.model_dtype = next(self.model.model.parameters()).dtype - self.add_quant_config() - self.layers_cache = {} - self.collect_model_qparams() - - @torch.no_grad() - def add_quant_config(self): - self.prefix = self.model.block_name_prefix - special_config = self.quant_config['special'] - - self.true_sequential = special_config['true_sequential'] - self.static_groups = special_config['static_groups'] - self.actorder = special_config['actorder'] - self.percdamp = special_config['percdamp'] - self.blocksize = special_config['blocksize'] - - self.owq = special_config.get('owq', False) - self.chunk_num = special_config.get('chunk_num', 1) - - if self.owq: - self.n_outs = special_config['n_outs'] - self.static_groups = False - self.actorder = False - - self.need_perm = ( - self.wquantizer.granularity == 'per_group' - and not self.static_groups - and self.actorder - ) or self.owq - - def hessian_sorting(self, name): - H = self.layers_cache[name]['H'] - - if not self.owq: - if self.actorder: - self.perm = torch.argsort(torch.diag(H), descending=True) - return - - temp_mask = torch.full([self.columns], True, device=self.dev) - H_diag = torch.diag(H) - descending_ids = torch.argsort(H_diag, descending=True) - temp_mask[descending_ids[: self.n_out]] = False - - if self.actorder: - perm = torch.cat( - [descending_ids[self.n_out:], descending_ids[:self.self.n_out]] - ) - else: - perm = torch.cat( - [ - torch.arange(self.columns, device=self.dev)[temp_mask], - descending_ids[: self.n_out], - ] - ) - - self.perm = perm - - @torch.no_grad() - def block_transform(self, block, input_feat, block_kwargs): - if self.online_rotate: - self.replace_rotate_linears(block) - if self.owq and not hasattr(self, 'n_out_dict'): - named_linears = self.model.get_block_linears(block) - self.n_out_dict = {} - for i, name in enumerate(named_linears.keys()): - self.n_out_dict[name] = self.n_outs[i] - super().block_transform(block, input_feat, block_kwargs) - - @torch.no_grad() - def subset_transform( - self, - subset, - input_feat, - subset_kwargs, - ): - layers_dict = subset['layers'] - for name in layers_dict: - layer = layers_dict[name] - if not isinstance( - layer, tuple(_LLMC_LINEAR_TYPES_ + _TRANSFORMERS_LINEAR_TYPES_) - ): - continue - self.layer_transform(layer, name) - self.free(name) - - @torch.no_grad() - def layer_transform(self, layer, name): - self.initialize_qparams_and_prepare_weights(layer, name) - W, H = self.process_hessian_and_weights(layer, name) - self.update_layer_with_transformed_weights(layer, W, H, name) - - def initialize_qparams_and_prepare_weights(self, layer, name): - self.qparams = {} - self.columns = self.layers_cache[name]['columns'] - self.n_out = self.n_out_dict[name] if self.owq else 0 - self.n_nonout = self.columns - self.n_out - - if self.actorder or self.owq: - self.hessian_sorting(name) - - def process_hessian_and_weights(self, layer, name): - W = layer.weight.data.clone() - if isinstance(layer, nn.Conv2d): - W = W.flatten(1) - elif isinstance(layer, transformers.Conv1D): - W = W.t() - - W = W.float() - H = self.layers_cache[name]['H'] - del self.layers_cache[name]['H'] - - dead = torch.diag(H) == 0 - H[dead, dead] = 1 - W[:, dead] = 0 - - if not self.ready(): - if self.wquantizer.granularity == 'per_group': - self.groups = [] - self.search_group_qparams(layer) - else: - self.search_layer_qparams(layer) - - if self.actorder or self.owq: - W = W[:, self.perm] - H = H[self.perm][:, self.perm] - self.invperm = torch.argsort(self.perm) - - layer.register_buffer('buf_perm', self.perm) - layer.register_buffer('buf_invperm', self.invperm) - - if self.owq: - layer.register_buffer('buf_n_nonout', torch.tensor(self.n_nonout)) - if self.wquantizer.granularity == 'per_channel': - _, layer.buf_scales, layer.buf_zeros, _, _ = ( - self.wquantizer.get_tensor_qparams(W[:, : self.n_nonout]) - ) - self.qparams['scale'], self.qparams['zero'] = ( - layer.buf_scales, - layer.buf_zeros, - ) - - damp = self.percdamp * torch.mean(torch.diag(H)) - diag = torch.arange(self.columns, device=self.dev) - H[diag, diag] += damp - H = torch.linalg.cholesky(H) - H = torch.cholesky_inverse(H) - H = torch.linalg.cholesky(H, upper=True) - - return W, H - - def update_layer_with_transformed_weights(self, layer, W, H, name): - Losses = torch.zeros_like(W) - tmp = torch.zeros_like(W) - - self.weight_transform(W, H, Losses, tmp) - torch.cuda.synchronize() - logger.info(f'error {torch.sum(Losses).item()}') - - if self.actorder or self.owq: - tmp[:, self.n_nonout:] = W[:, self.n_nonout:] - tmp = tmp[:, self.invperm] - - if isinstance(layer, transformers.Conv1D): - tmp = tmp.t() - - layer.weight.data = tmp.reshape(layer.weight.shape) - - if self.wquantizer.granularity == 'per_group' and not self.static_groups: - self.update_model_qparams(layer) - - @torch.no_grad() - def weight_transform(self, W, Hinv, Losses, tmp): - for i1 in range(0, self.n_nonout, self.blocksize): - i2 = min(i1 + self.blocksize, self.n_nonout) - count = i2 - i1 - W1, Hinv1 = W[:, i1:i2].clone(), Hinv[i1:i2, i1:i2] - tmp1, Err1, Losses1 = ( - torch.zeros_like(W1), - torch.zeros_like(W1), - torch.zeros_like(W1), - ) - - for i in range(count): - w, d = W1[:, i], Hinv1[i, i] - if self.wquantizer.granularity == 'per_group': - idx = i1 + i - if not self.static_groups: - if (i1 + i) % self.wquantizer.group_size == 0: - column_tensors = W[ - :, - (i1 + i): min( - (i1 + i + self.wquantizer.group_size), - (self.columns - self.n_out), - ), - ] - self.search_column_qparams(column_tensors, idx) - else: - if self.actorder: - idx = self.perm[idx] - self.qparams = self.groups[idx // self.wquantizer.group_size] - - q = self.wquantizer.quant_dequant( - w.unsqueeze(1), - self.qparams['scale'], - self.qparams['zero'], - self.qparams['qmax'], - self.qparams['qmin'], - ).squeeze(1) - - tmp1[:, i] = w - Losses1[:, i] = ((w - q) ** 2) / (2 * d**2) - err1 = (w - q) / d - W1[:, i:] -= err1.unsqueeze(1).matmul(Hinv1[i, i:].unsqueeze(0)) - Err1[:, i] = err1 - - tmp[:, i1:i2], Losses[:, i1:i2] = tmp1, Losses1 - W[:, i2:] -= Err1.matmul(Hinv[i1:i2, i2:]) - - @torch.no_grad() - def cache_input_hook(self, m, inp, out, name, feat_dict): - if isinstance(m, tuple(_LLMC_LINEAR_TYPES_ + _TRANSFORMERS_LINEAR_TYPES_)): - self.add_batch(self.named_layers[name], name, inp[0].data, out.data) - if self.act_static: - super().cache_input_hook(m, inp, out, name, feat_dict) - - @torch.no_grad() - def add_batch(self, layer, name, inp, out): - world_size = int(os.environ['WORLD_SIZE']) - if len(inp.shape) == 2: - inp = inp.unsqueeze(0) - tmp = inp.shape[0] - if isinstance( - layer, (FakeQuantLinear, nn.Linear, transformers.Conv1D, RotateLinear) - ): - if isinstance(layer, RotateLinear): - # online rotate - inp = layer.rotater.rotate(inp) - if len(inp.shape) == 3: - inp = inp.reshape((-1, inp.shape[-1])) - inp = inp.t() - if isinstance(layer, nn.Conv2d): - unfold = nn.Unfold( - layer.kernel_size, - dilation=layer.dilation, - padding=layer.padding, - stride=layer.stride, - ) - inp = unfold(inp) - inp = inp.permute([1, 0, 2]) - inp = inp.flatten(1) - - assert inp.shape[1] % self.chunk_num == 0, \ - f'Error: inp.shape[1] ({inp.shape[1]}) cannot be evenly divided by chunk_num.' - chunks = torch.chunk(inp, self.chunk_num, dim=1) - - self.layers_cache[name]['H'] *= self.layers_cache[name]['nsamples'] / ( - self.layers_cache[name]['nsamples'] + tmp - ) - self.layers_cache[name]['nsamples'] += tmp - - for chunk in chunks: - chunk = math.sqrt(2 / self.layers_cache[name]['nsamples']) * chunk.float() - self.layers_cache[name]['H'] += chunk.matmul(chunk.t()) - - dist.all_reduce(self.layers_cache[name]['H'], op=dist.ReduceOp.SUM) - dist.all_reduce(torch.tensor(self.layers_cache[name]['nsamples']).cuda(), - op=dist.ReduceOp.SUM) - self.layers_cache[name]['H'] /= world_size - - @torch.no_grad() - def layer_init(self, layer, name): - W = layer.weight.data.clone() - if isinstance(layer, nn.Conv2d): - W = W.flatten(1) - if isinstance(layer, transformers.Conv1D): - W = W.t() - self.layers_cache[name]['H'] = torch.zeros( - (W.shape[1], W.shape[1]), device=self.dev - ) - self.layers_cache[name]['nsamples'] = 0 - self.layers_cache[name]['columns'] = W.shape[1] - - @torch.no_grad() - def subset_init(self, subset): - self.named_layers = subset['layers'] - for name in self.named_layers: - self.layers_cache[name] = {} - self.layer_init(self.named_layers[name], name) - - @torch.no_grad() - def block_init(self, block): - self.named_layers = self.model.get_block_linears(block) - for name in self.named_layers: - self.layers_cache[name] = {} - self.layer_init(self.named_layers[name], name) - - @torch.no_grad() - def collect_model_qparams(self): - for i in range(len(self.blocks)): - block = self.blocks[i] - block = block.cuda() - self.collect_block_qparams(block) - block = block.cpu() - - @torch.no_grad() - def split_qparams(self, qparams): - group_qparams = [] - group_num = math.ceil(self.columns / self.wquantizer.group_size) - qparams = qparams.reshape(math.ceil(qparams.shape[0] / group_num), -1) - qparams = qparams.t() - group_qparams = list(torch.split(qparams, 1, dim=0)) - for i in range(len(group_qparams)): - group_qparams[i] = group_qparams[i].reshape(-1, 1) - return group_qparams - - @torch.no_grad() - def merge_qparams(self, qparams): - if isinstance(qparams, int): - return qparams - if self.wquantizer.granularity == 'per_head': - head_size = self.rows // self.head_num - qparams = qparams.t() - qparams = qparams.repeat(head_size, 1) - qparams = qparams.t() - qparams = qparams.reshape(-1, 1) - elif self.wquantizer.granularity == 'per_group': - qparams = torch.stack(qparams, dim=1) - qparams = qparams.reshape(-1, 1) - return qparams - - @torch.no_grad() - def search_column_qparams(self, c_tensor, idx): - _, scale, zero, qmax, qmin = self.wquantizer.get_tensor_qparams(c_tensor) - self.qparams['scale'] = scale - self.qparams['zero'] = zero - self.qparams['qmax'] = qmax - self.qparams['qmin'] = qmin - qparams = copy.deepcopy(self.qparams) - self.groups[idx // self.wquantizer.group_size] = qparams - - @torch.no_grad() - def search_layer_qparams(self, layer): - scales = layer.buf_scales - zeros = layer.buf_zeros - scales = self.merge_qparams(scales) - if not self.wquantizer.sym: - zeros = self.merge_qparams(zeros) - self.qparams['scale'], self.qparams['zero'] = scales, zeros - self.qparams['qmax'] = layer.buf_qmax - self.qparams['qmin'] = layer.buf_qmin - - @torch.no_grad() - def search_group_qparams(self, layer): - scales = layer.buf_scales - zeros = layer.buf_zeros - self.group_scales = self.split_qparams(scales) - if not self.wquantizer.sym: - self.group_zeros = self.split_qparams(zeros) - for i in range(len(self.group_scales)): - qparams = {} - qparams['scale'] = self.group_scales[i] - if not self.wquantizer.sym: - qparams['zero'] = self.group_zeros[i] - else: - qparams['zero'] = torch.tensor(0.0) - qparams['qmax'] = layer.buf_qmax - qparams['qmin'] = layer.buf_qmin - self.groups.append(qparams) - - @torch.no_grad() - def update_model_qparams(self, layer): - _scales = [] - _zeros = [] - for g in self.groups: - _scales.append(g['scale']) - _zeros.append(g['zero']) - scales = self.merge_qparams(_scales) - layer.buf_scales = copy.deepcopy(scales) - - if not self.wquantizer.sym: - zeros = self.merge_qparams(_zeros) - layer.buf_zeros = copy.deepcopy(zeros) - - @torch.no_grad() - def w_q(self, module, wquantizer): - weight = module.weight.data - args = {} - args['scales'] = module.buf_scales - args['zeros'] = module.buf_zeros - args['qmax'] = module.buf_qmax - args['qmin'] = module.buf_qmin - args['scales'] = args['scales'].to(self.model_dtype) - - weight, scales, zeros = wquantizer.real_quant_weight_static(weight, args) - return weight, scales, zeros - - @torch.no_grad() - def w_qdq(self, module, wquantizer): - weight = module.weight - if self.need_perm: - perm = module.buf_perm - weight = module.weight[:, perm] - - args = {} - args['scales'] = module.buf_scales - if hasattr(module, 'buf_zeros'): - args['zeros'] = module.buf_zeros - else: - args['zeros'] = None - args['qmax'] = module.buf_qmax - args['qmin'] = module.buf_qmin - - if self.owq: - fp_weight = weight[:, module.buf_n_nonout:] - - weight = wquantizer.fake_quant_weight_static(weight, args).to(self.model_dtype) - - if self.owq: - weight[:, module.buf_n_nonout:] = fp_weight.to(self.model_dtype) - - if self.need_perm: - invperm = module.buf_invperm - weight = weight[:, invperm] - - return weight - - @torch.no_grad() - def deploy(self, quant_format): - if quant_format not in ['fake_quant', 'origin_float']: - assert not self.need_perm - super().deploy(quant_format) - self.model.convert_dtype(self.model_dtype) - - @torch.no_grad() - def save_model(self, path): - self.model.convert_dtype(self.model_dtype) - super().save_model(path) - - @torch.no_grad() - def free(self, name): - self.H = None - self.Losses = None - self.Trace = None - del self.layers_cache[name] - torch.cuda.empty_cache() - - @torch.no_grad() - def ready(self): - if 'scale' not in self.qparams: - return False - return torch.all(self.qparams['scale'] != 0) diff --git a/llmc/compression/quantization/hadamard_utils.py b/llmc/compression/quantization/hadamard_utils.py deleted file mode 100644 index be7182c67..000000000 --- a/llmc/compression/quantization/hadamard_utils.py +++ /dev/null @@ -1,97456 +0,0 @@ -import math - -import torch -from loguru import logger - -try: - import fast_hadamard_transform -except Exception: - logger.warning( - 'fast_hadamard_transform not installed.' - 'If you need it, please install it firstly.' - ) - -# from .module_utils import RotateLinear -# Adapted from -# https://github.com/Cornell-RelaxML/quip-sharp/blob/main/lib/utils/matmul_had.py - - -def get_hadK(n, transpose=False): - hadK, K = None, None - if n % 172 == 0: # llama-2-7b up - assert is_pow2(n // 172) - K = 172 - hadK = get_had172().T if transpose else get_had172() - elif n % 156 == 0: # llama-1-30b 3x hidden - assert is_pow2(n // 156) - K = 156 - hadK = get_had156().T if transpose else get_had156() - elif n % 140 == 0: # llama-1-30b intermediate - assert is_pow2(n // 140) - K = 140 - hadK = get_had140().T if transpose else get_had140() - elif n % 108 == 0: # llama-1-13b intermediate - assert is_pow2(n // 108) - K = 108 - hadK = get_had108().T if transpose else get_had108() - elif n % 60 == 0: # llama-1-13b 3x hidden - assert is_pow2(n // 60) - K = 60 - hadK = get_had60().T if transpose else get_had60() - elif n % 52 == 0: # llama-1-13b 1x hidden - assert is_pow2(n // 52) - K = 52 - hadK = get_had52().T if transpose else get_had52() - elif n % 36 == 0: - assert is_pow2(n // 36) - K = 36 - hadK = get_had36().T if transpose else get_had36() - elif n % 28 == 0: # llama-3 up - assert is_pow2(n // 28) - K = 28 - hadK = get_had28().T if transpose else get_had28() - elif n % 40 == 0: - assert is_pow2(n // 40) - K = 40 - hadK = get_had40().T if transpose else get_had40() - elif n % 20 == 0: - assert is_pow2(n // 20) - K = 20 - hadK = get_had20().T if transpose else get_had20() - elif n % 12 == 0: - assert is_pow2(n // 12) - K = 12 - hadK = get_had12().T if transpose else get_had12() - else: - assert is_pow2(n) - K = 1 - - return hadK, K - - -def matmul_hadU(X, transpose=False): - n = X.shape[-1] - hadK, K = get_hadK(n, transpose) - input = X.clone().view(-1, n, 1) - output = input.clone() - while input.shape[1] > K: - input = input.view(input.shape[0], input.shape[1] // 2, 2, input.shape[2]) - output = output.view(input.shape) - output[:, :, 0, :] = input[:, :, 0, :] + input[:, :, 1, :] - output[:, :, 1, :] = input[:, :, 0, :] - input[:, :, 1, :] - output = output.view(input.shape[0], input.shape[1], -1) - (input, output) = (output, input) - del output - - if K > 1: - # Do not explicitly repeat - OOM - # input = torch.bmm( - # hadK.repeat(len(input), 1, 1).to(input.device).to(input.dtype), input) - # Use bcast instead - input = hadK.view(1, K, K).to(input) @ input - - return input.view(X.shape) / torch.tensor(n).sqrt() - - -def matmul_hadUt(X): - return matmul_hadU(X, transpose=True) - - -def random_hadamard_matrix(size, device): - # See https://cornell-relaxml.github.io/quip-sharp/, - # Section "Randomized Hadamard Transformation" - Q = torch.randint(low=0, high=2, size=(size,)).to(torch.float64) - Q = Q * 2 - 1 - Q = torch.diag(Q) - return matmul_hadU(Q).to(device) - - -def matmul_hadU_cuda(X, hadK, K): - n = X.shape[-1] - if K == 1: - return fast_hadamard_transform.hadamard_transform( - X.contiguous(), 1.0 / torch.tensor(n).sqrt() - ) - # if transpose: - # hadK = hadK.T.contiguous() - input = X.view(-1, K, n // K) - input = fast_hadamard_transform.hadamard_transform( - input.contiguous(), 1.0 / torch.tensor(n).sqrt() - ) - input = hadK.to(input.device).to(input.dtype) @ input - return input.reshape(X.shape) - - -def matmul_hadUt_cuda(X, hadK, K): - return matmul_hadU_cuda(X, hadK, K, transpose=True) - - -def apply_exact_had_to_linear(module, had_dim=-1, output=False): - # assert isinstance(module, (torch.nn.Linear, RotateLinear)) - in_features, out_features = module.in_features, module.out_features - - if had_dim != -1: - assert is_pow2(had_dim), 'Hadamard dimension must be a power of 2!' - - W_ = module.weight.data - dtype = W_.dtype - dev = W_.device - # init_shape = W_.shape - W_ = W_.float().cuda() - - if had_dim == -1: - if output: - had_K, K = get_hadK(out_features) - W_ = matmul_hadU_cuda(W_.t(), had_K, K).t() - if not output: - had_K, K = get_hadK(in_features) - W_ = matmul_hadU_cuda(W_, had_K, K) - else: - # Apply Hadamard to the last had_dim chunks of the weights - if output: - W_ = W_.t() - transposed_shape = W_.shape - W_ = ( - fast_hadamard_transform.hadamard_transform( - W_.reshape(-1, transposed_shape[-1] // had_dim, had_dim), - scale=1 / math.sqrt(had_dim), - ) - .reshape(transposed_shape) - .t() - ) - else: - raise NotImplementedError('Not implemented (or tested) yet!') - # n = W_.shape[1] - # W_ = hadamard_transform( - # W_.reshape(-1, n // had_dim, had_dim), scale=1 / math.sqrt(had_dim) - # ).reshape(init_shape) - module.weight.data = W_.to(device=dev, dtype=dtype) - - -def is_pow2(n): - return (n & (n - 1) == 0) and (n > 0) - - -# hadamard matrices for had12, had36.pal2, had52,will, -# # had60.pal, had108.pal, had140.pal, had156.will, had172.will: -# http://www.neilsloane.com/hadamard/index.html -def get_had12(): - return torch.FloatTensor( - [ - [+1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], - [+1, +1, -1, +1, -1, -1, -1, +1, +1, +1, -1, +1], - [+1, +1, +1, -1, +1, -1, -1, -1, +1, +1, +1, -1], - [+1, -1, +1, +1, -1, +1, -1, -1, -1, +1, +1, +1], - [+1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, +1], - [+1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1], - [+1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1], - [+1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1], - [+1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1], - [+1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1], - [+1, +1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1], - [+1, -1, +1, -1, -1, -1, +1, +1, +1, -1, +1, +1], - ] - ) - - -def get_had40(): - return torch.FloatTensor( - [ - [ - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - ], - [ - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - ], - ] - ) - - -def get_had20(): - return torch.FloatTensor( - [ - [ - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - ], - ] - ) - - -def get_had28(): - return torch.FloatTensor( - [ - [ - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - ], - [ - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - ], - ] - ) - - -def get_had36(): - return torch.FloatTensor( - [ - [ - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - ], - [ - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - ], - ] - ) - - -def get_had60(): - return torch.FloatTensor( - [ - [ - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - ], - ] - ) - - -def get_had52(): - return torch.FloatTensor( - [ - [ - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - ], - [ - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - ], - [ - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - ], - [ - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - ], - [ - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - ], - [ - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - ], - [ - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - ], - [ - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - ], - [ - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - ], - [ - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - ], - [ - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - ], - [ - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - ], - [ - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - ], - [ - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - ], - [ - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - ], - [ - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - ], - [ - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - ], - [ - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - ], - [ - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - ], - [ - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - ], - [ - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - ], - [ - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - ], - [ - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - ], - [ - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - ], - [ - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - ], - ] - ) - - -def get_had108(): - return torch.FloatTensor( - [ - [ - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - ], - ] - ) - - -def get_had140(): - return torch.FloatTensor( - [ - [ - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - ], - ] - ) - - -def get_had156(): - return torch.FloatTensor( - [ - [ - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - ], - [ - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - ], - [ - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - ], - [ - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - ], - [ - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - ], - [ - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - ], - [ - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - ], - [ - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - ], - [ - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - ], - [ - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - ], - [ - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - ], - [ - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - ], - [ - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - ], - [ - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - ], - [ - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - ], - [ - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - ], - [ - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - ], - [ - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - ], - [ - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - ], - [ - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - ], - [ - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - ], - [ - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - ], - [ - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - ], - [ - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - ], - [ - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - ], - [ - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - ], - [ - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - ], - [ - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - ], - [ - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - ], - [ - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - ], - [ - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - ], - [ - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - ], - [ - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - ], - [ - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - ], - [ - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - ], - [ - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - ], - [ - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - ], - [ - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - ], - [ - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - ], - [ - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - ], - [ - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - ], - [ - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - ], - [ - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - ], - [ - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - ], - [ - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - ], - [ - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - ], - [ - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - ], - [ - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - ], - [ - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - ], - [ - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - ], - [ - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - ], - [ - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - ], - [ - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - ], - [ - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - ], - [ - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - ], - [ - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - ], - [ - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - ], - [ - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - ], - [ - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - ], - [ - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - ], - [ - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - ], - [ - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - ], - [ - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - ], - [ - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - ], - [ - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - ], - [ - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - ], - [ - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - ], - [ - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - ], - [ - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - ], - [ - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - ], - [ - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - ], - [ - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - ], - [ - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - ], - [ - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - ], - [ - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - ], - [ - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - ], - [ - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - ], - [ - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - ], - [ - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - ], - [ - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - ], - [ - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - ], - [ - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - ], - [ - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - ], - [ - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - ], - ] - ) - - -def get_had172(): - return torch.FloatTensor( - [ - [ - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - ], - [ - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - ], - [ - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - ], - [ - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - ], - [ - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - ], - [ - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - ], - [ - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - ], - [ - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - ], - [ - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - ], - [ - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - ], - [ - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - ], - [ - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - ], - [ - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - ], - [ - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - ], - [ - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - ], - [ - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - ], - [ - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - ], - [ - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - ], - [ - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - ], - [ - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - ], - [ - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - ], - [ - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - ], - [ - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - ], - [ - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - ], - [ - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - ], - [ - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - ], - [ - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - ], - [ - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - ], - [ - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - ], - [ - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - ], - [ - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - ], - [ - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - ], - [ - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - ], - [ - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - ], - [ - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - ], - [ - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - ], - [ - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - ], - [ - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - ], - [ - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - ], - [ - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - ], - [ - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - ], - [ - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - ], - [ - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - ], - [ - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - ], - [ - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - ], - [ - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - ], - [ - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - ], - [ - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - ], - [ - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - ], - [ - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - ], - [ - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - ], - [ - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - ], - [ - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - ], - [ - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - ], - [ - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - ], - [ - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - ], - [ - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - ], - [ - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - ], - [ - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - ], - [ - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - ], - [ - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - ], - [ - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - ], - [ - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - ], - [ - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - ], - [ - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - ], - [ - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - ], - [ - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - ], - [ - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - ], - [ - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - ], - [ - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - ], - [ - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - ], - [ - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - ], - [ - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - ], - [ - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - ], - [ - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - ], - [ - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - ], - [ - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - ], - [ - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - ], - [ - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - ], - [ - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - ], - [ - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - ], - [ - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - ], - [ - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - ], - [ - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - ], - [ - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - ], - [ - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - ], - [ - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - ], - [ - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - ], - [ - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - ], - [ - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - ], - [ - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - ], - [ - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - ], - [ - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - ], - [ - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - ], - [ - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - ], - [ - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - ], - [ - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - ], - [ - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - ], - [ - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - ], - [ - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - ], - [ - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - ], - [ - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - ], - [ - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - ], - [ - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - ], - [ - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - +1, - ], - [ - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - +1, - ], - [ - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - -1, - ], - [ - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - -1, - ], - [ - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - -1, - ], - [ - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - -1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - -1, - +1, - -1, - -1, - -1, - -1, - +1, - +1, - +1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - +1, - +1, - +1, - -1, - +1, - -1, - +1, - +1, - +1, - +1, - -1, - -1, - +1, - +1, - -1, - -1, - -1, - +1, - ], - ] - ) diff --git a/llmc/compression/quantization/hqq.py b/llmc/compression/quantization/hqq.py deleted file mode 100644 index 0077c401b..000000000 --- a/llmc/compression/quantization/hqq.py +++ /dev/null @@ -1,105 +0,0 @@ -import gc - -import torch -import torch.nn as nn -from loguru import logger - -from llmc.utils.registry_factory import ALGO_REGISTRY - -from .base_blockwise_quantization import BaseBlockwiseQuantization - - -@ALGO_REGISTRY -class HQQ(BaseBlockwiseQuantization): - def __init__(self, model, quant_config, input, padding_mask, config): - super().__init__(model, quant_config, input, padding_mask, config) - self.add_quant_config() - - @torch.no_grad() - def add_quant_config(self): - self.lp_norm = self.quant_config['special']['lp_norm'] - self.beta = self.quant_config['special']['beta'] - self.kappa = self.quant_config['special']['kappa'] - self.iters = self.quant_config['special']['iters'] - self.axis = self.quant_config['special']['axis'] - if self.lp_norm == 1: - self.shrink_op = lambda x, beta: torch.sign(x) * torch.nn.functional.relu( - torch.abs(x) - 1.0 / self.beta - ) - else: - self.shrink_op = lambda x, beta, p=self.lp_norm: torch.sign( - x - ) * torch.nn.functional.relu( - torch.abs(x) - (1.0 / self.beta) * torch.pow(torch.abs(x), p - 1) - ) - - @torch.no_grad() - def optimize_weights_proximal(self, W_f, scales, zeros, qmax, qmin): - best_error = 1e4 - current_beta = self.beta - current_kappa = self.kappa - scales = 1 / scales - for i in range(self.iters): - W_q = torch.round(W_f * scales + zeros).clamp(qmin, qmax) - W_r = (W_q - zeros) / scales - W_e = self.shrink_op(W_f - W_r, current_beta) - - zeros = torch.mean(W_q - (W_f - W_e) * scales, axis=-1, keepdim=True) - current_beta *= current_kappa - current_error = float(torch.abs(W_f - W_r).mean()) - - logger.info(f'iter : {i}, error : {current_error}') - - if current_error < best_error: - best_error = current_error - else: - break - - torch.cuda.empty_cache() - scales = 1 / scales - - return scales, zeros - - @torch.no_grad() - def block_opt(self, block): - block = block.cuda() - named_linears = self.model.get_block_linears(block) - logger.info(f'named_linears: {named_linears}') - - for name in named_linears: - logger.info(f'Optimize weights proximal of {name}') - layer = named_linears[name] - - tensor = layer.weight.data.float() - if self.axis == 0: - tensor = tensor.T - ( - tensor, - org_scales, - org_zeros, - qmax, - qmin, - ) = self.wquantizer.get_tensor_qparams(tensor) - - best_scales, best_zeros = self.optimize_weights_proximal( - tensor, org_scales, org_zeros, qmax, qmin - ) - layer.register_buffer('buf_scales', best_scales) - layer.register_buffer('buf_zeros', best_zeros) - layer.register_buffer('buf_qmax', torch.tensor(qmax)) - layer.register_buffer('buf_qmin', torch.tensor(qmin)) - - block = block.cpu() - gc.collect() - torch.cuda.empty_cache() - - def w_qdq(self, module, wquantizer): - args = {} - if self.axis == 0: - args['dim'] = 'ic' - args['scales'] = module.buf_scales - args['zeros'] = module.buf_zeros - args['qmax'] = module.buf_qmax - args['qmin'] = module.buf_qmin - - return wquantizer.fake_quant_weight_static(module.weight, args) diff --git a/llmc/compression/quantization/kernel.py b/llmc/compression/quantization/kernel.py deleted file mode 100755 index 7bd070a38..000000000 --- a/llmc/compression/quantization/kernel.py +++ /dev/null @@ -1,242 +0,0 @@ -import torch -import triton -import triton.language as tl -from triton import Config - - -@triton.jit -def act_quant_kernel(x_ptr, y_ptr, s_ptr, BLOCK_SIZE: tl.constexpr): - """Quantizes the input tensor `x_ptr` and stores the result in `y_ptr` and - the scaling factor in `s_ptr`. - - Args: - x_ptr (triton.Pointer): Pointer to the input tensor. - y_ptr (triton.Pointer): Pointer to the output tensor where quantized values will be stored. - s_ptr (triton.Pointer): Pointer to the output tensor where scaling factors will be stored. - BLOCK_SIZE (tl.constexpr): The size of the block to be processed by each program instance. - - Returns: - None - """ - pid = tl.program_id(axis=0) - offs = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) - x = tl.load(x_ptr + offs).to(tl.float32) - s = tl.max(tl.abs(x)) / 448.0 - y = x / s - y = y.to(y_ptr.dtype.element_ty) - tl.store(y_ptr + offs, y) - tl.store(s_ptr + pid, s) - - -def act_quant(x, block_size=128): - """Quantizes the input tensor `x` using block-wise quantization. - - Args: - x (torch.Tensor): The input tensor to be quantized. Must be contiguous and - its last dimension size must be divisible by `block_size`. - block_size (int, optional): The size of the blocks to be used for quantization. - Default is 128. - - Returns: - Tuple[torch.Tensor, torch.Tensor]: A tuple containing: - - The quantized tensor with dtype `torch.float8_e4m3fn`. - - A tensor of scaling factors with dtype `torch.float32`. - """ - assert x.is_contiguous(), 'Input tensor must be contiguous' - assert ( - x.size(-1) % block_size == 0 - ), f'Last dimension size must be divisible by block_size (block_size={block_size})' - y = torch.empty_like(x, dtype=torch.float8_e4m3fn) - s = x.new_empty(*x.size()[:-1], x.size(-1) // block_size, dtype=torch.float32) - grid = lambda meta: (triton.cdiv(x.numel(), meta['BLOCK_SIZE']),) # noqa - act_quant_kernel[grid](x, y, s, BLOCK_SIZE=block_size) - return y, s - - -@triton.jit -def weight_cast_to_fp8_kernel(x_ptr, y_ptr, s_ptr, M, N, BLOCK_SIZE: tl.constexpr): - pid_m = tl.program_id(axis=0) - pid_n = tl.program_id(axis=1) - n = tl.cdiv(N, BLOCK_SIZE) - offs_m = pid_m * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) - offs_n = pid_n * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) - offs = offs_m[:, None] * N + offs_n[None, :] - mask = (offs_m[:, None] < M) & (offs_n[None, :] < N) - x = tl.load(x_ptr + offs, mask=mask).to(tl.float32) - s = tl.max(tl.abs(x)) / 448. - y = x / s - y = y.to(y_ptr.dtype.element_ty) - tl.store(y_ptr + offs, y, mask=mask) - tl.store(s_ptr + pid_m * n + pid_n, s) - - -def weight_cast_to_fp8(x, block_size=128): - assert x.is_contiguous() - assert x.dim() == 2 - M, N = x.size() - y = torch.empty_like(x, dtype=torch.float8_e4m3fn) - sM = torch.tensor(1.0 * M / block_size).ceil().int() - sN = torch.tensor(1.0 * N / block_size).ceil().int() - s = x.new_empty(sM, sN, dtype=torch.float32) - grid = lambda meta: (triton.cdiv(M, meta['BLOCK_SIZE']), triton.cdiv(N, meta['BLOCK_SIZE'])) # noqa - weight_cast_to_fp8_kernel[grid](x, y, s, M, N, BLOCK_SIZE=block_size) - return y, s - - -@triton.jit -def weight_cast_to_bf16_kernel(x_ptr, s_ptr, y_ptr, M, N, BLOCK_SIZE: tl.constexpr): - """Dequantizes weights using the provided scaling factors and stores the - result. - - Args: - x_ptr (tl.pointer): Pointer to the quantized weights. - s_ptr (tl.pointer): Pointer to the scaling factors. - y_ptr (tl.pointer): Pointer to the output buffer for dequantized weights. - M (int): Number of rows in the weight matrix. - N (int): Number of columns in the weight matrix. - BLOCK_SIZE (tl.constexpr): Size of the block for tiling. - - Returns: - None - """ - pid_m = tl.program_id(axis=0) - pid_n = tl.program_id(axis=1) - n = tl.cdiv(N, BLOCK_SIZE) - offs_m = pid_m * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) - offs_n = pid_n * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) - offs = offs_m[:, None] * N + offs_n[None, :] - mask = (offs_m[:, None] < M) & (offs_n[None, :] < N) - x = tl.load(x_ptr + offs, mask=mask).to(tl.float32) - s = tl.load(s_ptr + pid_m * n + pid_n) - y = x * s - tl.store(y_ptr + offs, y, mask=mask) - - -def weight_cast_to_bf16(x, s, block_size=128): - """Dequantizes the given weight tensor using the provided scale tensor. - - Args: - x (torch.Tensor): The quantized weight tensor of shape (M, N). - s (torch.Tensor): The scale tensor of shape (M, N). - block_size (int, optional): The block size to use for dequantization. Defaults to 128. - - Returns: - torch.Tensor: The dequantized weight tensor of the same shape as `x`. - - Raises: - AssertionError: If `x` or `s` are not contiguous or if their dimensions are not 2. - """ - assert x.is_contiguous() and s.is_contiguous(), 'Input tensors must be contiguous' - assert x.dim() == 2 and s.dim() == 2, 'Input tensors must have 2 dimensions' - M, N = x.size() - y = torch.empty_like(x, dtype=torch.get_default_dtype()) - grid = lambda meta: ( # noqa - triton.cdiv(M, meta['BLOCK_SIZE']), - triton.cdiv(N, meta['BLOCK_SIZE']), - ) - weight_cast_to_bf16_kernel[grid](x, s, y, M, N, BLOCK_SIZE=block_size) - return y - - -fp8_gemm_configs = [ - Config( - {'BLOCK_SIZE_M': block_m, 'BLOCK_SIZE_N': block_n, 'BLOCK_SIZE_K': 128}, - num_stages=num_stages, - num_warps=8, - ) - for block_m in [16, 32, 64] - for block_n in [32, 64, 128] - for num_stages in [3, 4, 5, 6] -] - - -@triton.autotune(configs=fp8_gemm_configs, key=['N', 'K']) -@triton.jit -def fp8_gemm_kernel( - a_ptr, - b_ptr, - c_ptr, - a_s_ptr, - b_s_ptr, - M, - N: tl.constexpr, - K: tl.constexpr, - BLOCK_SIZE_M: tl.constexpr, - BLOCK_SIZE_N: tl.constexpr, - BLOCK_SIZE_K: tl.constexpr, -): - """Performs a matrix multiplication operation on FP8 matrices with scaling - factors. - - Args: - a_ptr (tl.tensor): Pointer to the first input matrix A. - b_ptr (tl.tensor): Pointer to the second input matrix B. - c_ptr (tl.tensor): Pointer to the output matrix C. - a_s_ptr (tl.tensor): Pointer to the scaling factors for matrix A. - b_s_ptr (tl.tensor): Pointer to the scaling factors for matrix B. - M (int): Number of rows in matrix A and C. - N (tl.constexpr): Number of columns in matrix B and C. - K (tl.constexpr): Number of columns in matrix A and rows in matrix B. - BLOCK_SIZE_M (tl.constexpr): Block size for the M dimension. - BLOCK_SIZE_N (tl.constexpr): Block size for the N dimension. - BLOCK_SIZE_K (tl.constexpr): Block size for the K dimension. - - Returns: - None - """ - pid_m = tl.program_id(axis=0) - pid_n = tl.program_id(axis=1) - k = tl.cdiv(K, BLOCK_SIZE_K) - offs_m = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M - offs_n = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N - offs_k = tl.arange(0, BLOCK_SIZE_K) - a_ptrs = a_ptr + offs_m[:, None] * K + offs_k[None, :] - b_ptrs = b_ptr + offs_n[None, :] * K + offs_k[:, None] - a_s_ptrs = a_s_ptr + offs_m * k - b_s_ptrs = b_s_ptr + (offs_n // BLOCK_SIZE_K) * k - - accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) - for i in range(k): - a = tl.load(a_ptrs, mask=offs_k[None, :] < K - i * BLOCK_SIZE_K, other=0.0) - b = tl.load(b_ptrs, mask=offs_k[:, None] < K - i * BLOCK_SIZE_K, other=0.0) - a_s = tl.load(a_s_ptrs) - b_s = tl.load(b_s_ptrs) - accumulator += tl.dot(a, b) * a_s[:, None] * b_s[None, :] - a_ptrs += BLOCK_SIZE_K - b_ptrs += BLOCK_SIZE_K - a_s_ptrs += 1 - b_s_ptrs += 1 - c = accumulator.to(c_ptr.dtype.element_ty) - offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) - offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) - c_ptrs = c_ptr + offs_m[:, None] * N + offs_n[None, :] - mask = (offs_m[:, None] < M) & (offs_n[None, :] < N) - tl.store(c_ptrs, c, mask=mask) - - -def fp8_gemm(a: torch.Tensor, a_s: torch.Tensor, b: torch.Tensor, b_s: torch.Tensor): - """Perform a matrix multiplication using FP8 precision. - - Args: - a (torch.Tensor): The first input matrix, must be contiguous. - a_s (torch.Tensor): The scaling factor for the first input matrix, must be contiguous. - b (torch.Tensor): The second input matrix, must be contiguous. - b_s (torch.Tensor): The scaling factor for the second input matrix, must be contiguous. - - Returns: - torch.Tensor: The result of the matrix multiplication. - """ - assert a.is_contiguous() and b.is_contiguous(), 'Input tensors must be contiguous' - assert ( - a_s.is_contiguous() and b_s.is_contiguous() - ), 'Scaling factor tensors must be contiguous' - K = a.size(-1) - M = a.numel() // K - N = b.size(0) - c = a.new_empty(*a.size()[:-1], N, dtype=torch.get_default_dtype()) - grid = lambda META: ( # noqa - triton.cdiv(M, META['BLOCK_SIZE_M']), - triton.cdiv(N, META['BLOCK_SIZE_N']), - ) - fp8_gemm_kernel[grid](a, b, c, a_s, b_s, M, N, K) - return c diff --git a/llmc/compression/quantization/kvquant.py b/llmc/compression/quantization/kvquant.py deleted file mode 100644 index 32c2de5be..000000000 --- a/llmc/compression/quantization/kvquant.py +++ /dev/null @@ -1,289 +0,0 @@ -import torch -from loguru import logger -from transformers import DynamicCache - -from llmc.utils.registry_factory import KV_REGISTRY - -from .quant import FloatQuantizer, IntegerQuantizer - - -@KV_REGISTRY.register('Naive') -class NaiveQuantKVCache(DynamicCache): - def __init__(self, quant_type, kvquant_cfg, num_hidden_layers, num_samples=128, bsz=1): - super().__init__() - - assert kvquant_cfg.granularity in ['per_token', 'per_tensor', 'per_group'] - self.num_hidden_layers, self.num_samples, self.bsz = ( - num_hidden_layers, - num_samples, - bsz, - ) - if quant_type == 'int-quant': - self.kvquantizer = IntegerQuantizer(**kvquant_cfg) - elif quant_type == 'float-quant': - self.kvquantizer = FloatQuantizer(**kvquant_cfg) - - self.kvquant_cfg = kvquant_cfg - self.static = kvquant_cfg.get('static', False) - self._quantized_key_cache = [] - self._quantized_value_cache = [] - self.use_org_kv = False - - if self.static: - self._reset_buffers() - self.calib_key_cache = [ - [] for i in range(self.num_hidden_layers) - ] - self.calib_value_cache = [ - [] for i in range(self.num_hidden_layers) - ] - self.calib = True - else: - self.calib = False - - def update( - self, - key_states, - value_states, - layer_idx, - cache_kwargs, - ): - if self.use_org_kv: - return super().update(key_states, value_states, layer_idx, cache_kwargs) - elif self.static and self.calib: - self._calibration(layer_idx, key_states, value_states) - keys_to_return, values_to_return = key_states, value_states - else: - if layer_idx == 0: - self._seen_tokens += key_states.shape[-2] - - if len(self._quantized_key_cache) <= layer_idx: - # Prefill - q_keys = self._quantize(key_states.contiguous(), layer_idx, is_key=True) - q_values = self._quantize( - value_states.contiguous(), layer_idx, is_key=False - ) - self._quantized_key_cache.append(q_keys) - self._quantized_value_cache.append(q_values) - keys_to_return = self._dequantize(q_keys) - values_to_return = self._dequantize(q_values) - else: - # Decode - dequant_key = self._dequantize(self._quantized_key_cache[layer_idx]) - dequant_value = self._dequantize(self._quantized_value_cache[layer_idx]) - keys_to_return = [dequant_key, key_states] - values_to_return = [dequant_value, value_states] - - keys_to_return = torch.cat(keys_to_return, dim=-2) - values_to_return = torch.cat(values_to_return, dim=-2) - - self._quantized_key_cache[layer_idx] = self._quantize( - keys_to_return.contiguous(), layer_idx, is_key=True - ) - self._quantized_value_cache[layer_idx] = self._quantize( - values_to_return.contiguous(), layer_idx, is_key=False - ) - - return keys_to_return, values_to_return - - def _check_pass_all_calib_data(self, layer_idx): - return ( - self.bsz == 1 and len(self.calib_value_cache[layer_idx]) == self.num_samples - ) or ( - self.bsz == -1 - and self.calib_value_cache[layer_idx][0].shape[0] == self.num_samples - ) - - def _calibration(self, layer_idx, key_states, value_states): - # Calibration data can be provided through the prompt or - # the preprocessed decode data. - # Therefore, calibration occurs only during the prefill stage. - self.calib_key_cache[layer_idx].append(key_states) - self.calib_value_cache[layer_idx].append(value_states) - - if self._check_pass_all_calib_data(layer_idx): - # Get and store calibration parameters for keys and values - for data, buffer, scale_buffer, zero_buffer, qmin_buffer, qmax_buffer in [ - ( - self.calib_key_cache[layer_idx], - self.k_scales_buffer, - self.k_scales_buffer, - self.k_zeros_buffer, - self.k_qmin_buffer, - self.k_qmax_buffer, - ), - ( - self.calib_value_cache[layer_idx], - self.v_scales_buffer, - self.v_scales_buffer, - self.v_zeros_buffer, - self.v_qmin_buffer, - self.v_qmax_buffer, - ), - ]: - scales, zeros, qmin, qmax = self.get_qparams(data) - ( - scale_buffer[layer_idx], - zero_buffer[layer_idx], - qmin_buffer[layer_idx], - qmax_buffer[layer_idx], - ) = (scales, zeros, qmin, qmax) - - # Clear the calibration caches - self.calib_key_cache[layer_idx].clear() - self.calib_value_cache[layer_idx].clear() - - def _quantize(self, tensor, layer_idx, is_key): - org_shape = tensor.shape - tensor = self.kvquantizer.reshape_tensor(tensor) - - if self.static: - scales = ( - self.k_scales_buffer[layer_idx] - if is_key - else self.v_scales_buffer[layer_idx] - ) - zeros = ( - self.k_zeros_buffer[layer_idx] - if is_key - else self.v_zeros_buffer[layer_idx] - ) - qmax = ( - self.k_qmax_buffer[layer_idx] - if is_key - else self.v_qmax_buffer[layer_idx] - ) - qmin = ( - self.k_qmin_buffer[layer_idx] - if is_key - else self.v_qmin_buffer[layer_idx] - ) - else: - tensor_range = self.kvquantizer.get_tensor_range(tensor, {}) - scales, zeros, qmax, qmin = self.kvquantizer.get_qparams( - tensor_range, tensor.device - ) - - q_tensor = self.kvquantizer.quant(tensor, scales, zeros, qmax, qmin) - q_tensor = self.kvquantizer.restore_tensor(q_tensor, org_shape) - - q_tensors = { - 'q_tensor': q_tensor, - 'scales': scales, - 'zeros': zeros, - } - - return q_tensors - - def _dequantize(self, q_tensors): - q_tensor = q_tensors['q_tensor'] - scales = q_tensors['scales'] - zeros = q_tensors['zeros'] - org_shape = q_tensor.shape - q_tensor = self.kvquantizer.reshape_tensor(q_tensor) - qdq_tensor = self.kvquantizer.dequant(q_tensor, scales, zeros) - qdq_tensor = self.kvquantizer.restore_tensor(qdq_tensor, org_shape) - return qdq_tensor - - def _reset_buffers(self): - self.k_scales_buffer = [torch.zeros(0)] * self.num_hidden_layers - self.k_zeros_buffer = [torch.zeros(0)] * self.num_hidden_layers - self.k_qmin_buffer = [0] * self.num_hidden_layers - self.k_qmax_buffer = [0] * self.num_hidden_layers - - self.v_scales_buffer = [torch.zeros(0)] * self.num_hidden_layers - self.v_zeros_buffer = [torch.zeros(0)] * self.num_hidden_layers - self.v_qmin_buffer = [0] * self.num_hidden_layers - self.v_qmax_buffer = [0] * self.num_hidden_layers - - def _reset_states(self): - self._quantized_key_cache = [] - self._quantized_value_cache = [] - self.key_cache = [] - self.value_cache = [] - self._seen_tokens = 0 - - def get_qparams(self, tensor): - scales_list, zeros_list, qmin_list, qmax_list = ( - self.kvquantizer.get_batch_tensors_qparams(tensor) - ) - scales, zeros, qmin, qmax = ( - scales_list[0], - zeros_list[0], - qmin_list[0], - qmax_list[0], - ) - return scales, zeros, qmin, qmax - - def get_seq_length(self, layer_idx=0): - if self.use_org_kv: - return super().get_seq_length() - if len(self._quantized_key_cache) <= layer_idx: - return 0 - return self._seen_tokens if layer_idx == 0 else self._seen_tokens - 1 - - -@KV_REGISTRY.register('Kivi') -class KiviQuantKVCache(NaiveQuantKVCache): - def __init__(self, quant_type, kvquant_cfg, num_hidden_layers, num_samples=128, bsz=1): - super().__init__(quant_type, kvquant_cfg, num_hidden_layers, num_samples, bsz) - assert not self.static, 'Only support dynamic quantization for KIVI' - self.residual_length = kvquant_cfg.get('residual_length', 128) - - def update( - self, - key_states, - value_states, - layer_idx, - cache_kwargs, - ): - if self.use_org_kv: - return super().update(key_states, value_states, layer_idx, cache_kwargs) - else: - if layer_idx == 0: - self._seen_tokens += key_states.shape[-2] - - if len(self.key_cache) <= layer_idx: - self._quantized_key_cache.append(self._quantize(key_states.contiguous(), - layer_idx, - is_key=True)) - self._quantized_value_cache.append(self._quantize(value_states.contiguous(), - layer_idx, - is_key=False)) - self.key_cache.append(torch.zeros(0, - dtype=key_states.dtype, - device=key_states.device)) - self.value_cache.append(torch.zeros(0, - dtype=key_states.dtype, - device=key_states.device)) - keys_to_return, values_to_return = key_states, value_states - else: - dequant_key = self._dequantize(self._quantized_key_cache[layer_idx]) - dequant_value = self._dequantize(self._quantized_value_cache[layer_idx]) - keys_to_return = [dequant_key, self.key_cache[layer_idx], key_states] - values_to_return = [dequant_value, self.value_cache[layer_idx], value_states] - - keys_to_return = torch.cat(keys_to_return, dim=-2) - values_to_return = torch.cat(values_to_return, dim=-2) - if ( - self.key_cache[layer_idx].dim() == 4 - and self.key_cache[layer_idx].shape[-2] + 1 >= self.residual_length - ): - self._quantized_key_cache[layer_idx] = \ - self._quantize(keys_to_return.contiguous(), layer_idx, is_key=True) - self._quantized_value_cache[layer_idx] = self._quantize( - values_to_return.contiguous(), layer_idx, is_key=False - ) - self.key_cache[layer_idx] = torch.zeros(0, - dtype=key_states.dtype, - device=key_states.device) - self.value_cache[layer_idx] = torch.zeros(0, - dtype=key_states.dtype, - device=key_states.device) - else: - self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], - dim=-2) - self.value_cache[layer_idx] = \ - torch.cat([self.value_cache[layer_idx], value_states], dim=-2) - - return keys_to_return, values_to_return diff --git a/llmc/compression/quantization/llmint8.py b/llmc/compression/quantization/llmint8.py deleted file mode 100644 index 116b8843e..000000000 --- a/llmc/compression/quantization/llmint8.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch -from loguru import logger - -from llmc.utils.registry_factory import ALGO_REGISTRY - -from .base_blockwise_quantization import BaseBlockwiseQuantization -from .module_utils import FakeQuantLinear - - -@ALGO_REGISTRY -class LlmInt8(BaseBlockwiseQuantization): - def __init__(self, model, quant_config, input, padding_mask, config): - super().__init__(model, quant_config, input, padding_mask, config) - self.add_quant_config() - - @torch.no_grad() - def add_quant_config(self): - self.threshold = self.quant_config['special']['threshold'] - - @torch.no_grad() - def block_opt(self, *opt_kwargs): - pass - - @torch.no_grad() - def get_outlier_indices(self, act): - tmp = act.abs().amax(dim=1) - - fp_indices = torch.where(tmp >= self.threshold)[1] - all_idx = torch.arange(act.shape[2]).to(act.device) - - tensor_is_not_in = torch.isin(all_idx, fp_indices, invert=True) - int_indices = all_idx[tensor_is_not_in] - - return int_indices, fp_indices - - @torch.no_grad() - def w_qdq(self, module, wquantizer): - weight = module.weight - args = {} - args['int_indices'] = module.buf_int_ids - args['fp_indices'] = module.buf_fp_ids - - weight = self.wquantizer.fake_quant_weight_dynamic(weight, args) - - return weight - - @torch.no_grad() - def a_qdq(self, act, module, aquantizer): - args = {} - - int_indices, fp_indices = self.get_outlier_indices(act) - - args['int_indices'] = int_indices - args['fp_indices'] = fp_indices - - module.register_buffer('buf_int_ids', int_indices) - module.register_buffer('buf_fp_ids', fp_indices) - - act = self.aquantizer.fake_quant_act_dynamic(act, args) - - return act - - @torch.no_grad() - def deploy(self, quant_format): - assert not quant_format != 'fake_quant' - logger.info(f'-- deploy_{quant_format}_model start --') - logger.info(f'quant_config : {self.quant_config}') - - self.model.replace_language_module_all( - FakeQuantLinear, - self.get_replacement_params( - mode='fake_quant', w_only=self.w_only, name=None - ), - ) - logger.info(f'-- deploy_{quant_format}_model done --') diff --git a/llmc/compression/quantization/module_utils.py b/llmc/compression/quantization/module_utils.py deleted file mode 100755 index 1c0e6e455..000000000 --- a/llmc/compression/quantization/module_utils.py +++ /dev/null @@ -1,1231 +0,0 @@ -import math -from functools import partial - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from loguru import logger -from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS - -from .utils import is_fp8_supported_gpu - -if is_fp8_supported_gpu(): - from .kernel import act_quant, fp8_gemm, weight_cast_to_bf16 - USE_FP8GEMM_TRITON_KERNEL = True - logger.info('Successfully imported Triton kernel.') -else: - USE_FP8GEMM_TRITON_KERNEL = False - from .quant import weight_cast_to_bf16 - logger.info( - 'Triton kernel not available: non-Hopper GPU detected.\n' - 'Using LLMC Quantizer implementation instead.' - ) - -try: - from vllm import _custom_ops as ops -except ModuleNotFoundError: - ops = None - -try: - import fast_hadamard_transform - - from .hadamard_utils import matmul_hadU_cuda -except Exception: - logger.warning( - 'fast_hadamard_transform not installed. ' - 'If you need it, please install it firstly.' - ) - - -def block_wise_fp8_forward_func(x, w, w_scale, block_size, bias): - x, scale = act_quant(x, block_size) - y = fp8_gemm(x, scale, w, w_scale).to(torch.bfloat16) - if bias is not None: - y += bias - return y - - -class FakeAffineLayerNorm(nn.Module): - def __init__(self, norm, shape): - super().__init__() - self.register_parameter('weight', nn.Parameter(torch.ones(shape, dtype=torch.float))) - self.register_parameter('bias', nn.Parameter(torch.ones(shape, dtype=torch.float))) - self.norm = norm - - def forward(self, x): - return self.norm(x) - - def extra_repr(self): - return f'affine=True (emulated), shape={self.weight.shape}' - - -class LlmcWanTransformerBlock(nn.Module): - def __init__(self, module): - super().__init__() - - self.affine_norm1 = FakeAffineLayerNorm(module.norm1, module.scale_shift_table.shape[-1]) - self.attn1 = module.attn1 - - self.attn2 = module.attn2 - self.norm2 = module.norm2 - - self.affine_norm3 = FakeAffineLayerNorm(module.norm1, module.scale_shift_table.shape[-1]) - self.ffn = module.ffn - self.scale_shift_table = module.scale_shift_table - - def forward( - self, - hidden_states, - encoder_hidden_states, - temb, - rotary_emb, - ): - shift_msa, scale_msa, gate_msa, c_shift_msa, c_scale_msa, c_gate_msa = ( - self.scale_shift_table + temb - ).chunk(6, dim=1) - - # 1. Self-attention - norm1_weight = (1 + scale_msa) * self.affine_norm1.weight - norm1_bias = shift_msa * self.affine_norm1.bias - - norm_hidden_states = ( - self.affine_norm1(hidden_states.float()) * norm1_weight + norm1_bias - ).type_as(hidden_states) - attn_output = self.attn1( - hidden_states=norm_hidden_states, rotary_emb=rotary_emb - ) - hidden_states = (hidden_states.float() + attn_output * gate_msa).type_as( - hidden_states - ) - - # 2. Cross-attention - norm_hidden_states = self.norm2(hidden_states.float()).type_as(hidden_states) - attn_output = self.attn2( - hidden_states=norm_hidden_states, - encoder_hidden_states=encoder_hidden_states, - ) - hidden_states = hidden_states + attn_output - - # 3. Feed-forward - norm3_weight = (1 + c_scale_msa) * self.affine_norm3.weight - norm3_bias = c_shift_msa * self.affine_norm3.bias - - norm_hidden_states = ( - self.affine_norm3(hidden_states.float()) * norm3_weight + norm3_bias - ).type_as(hidden_states) - ff_output = self.ffn(norm_hidden_states) - hidden_states = ( - hidden_states.float() + ff_output.float() * c_gate_msa - ).type_as(hidden_states) - - return hidden_states - - @classmethod - @torch.no_grad() - def new(cls, module): - new_module = cls(module) - return new_module - - -class VllmQuantLinearInt8(nn.Module): - def __init__(self, in_features, out_features, bias=True): - super().__init__() - self.in_features = in_features - self.out_features = out_features - - self.register_buffer('weight', torch.empty((out_features, in_features), dtype=torch.int8)) - self.register_buffer('weight_scale', torch.empty((out_features, 1), dtype=torch.float32)) - - if bias: - self.register_buffer('bias', torch.empty(out_features, dtype=torch.bfloat16)) - else: - self.register_buffer('bias', None) - - def act_quant_func(self, x): - input_tensor_quant, input_tensor_scale, _ \ - = ops.scaled_int8_quant(x, scale=None, azp=None, symmetric=True) - return input_tensor_quant, input_tensor_scale - - def forward(self, input_tensor): - input_tensor = input_tensor.squeeze(0) - shape = (input_tensor.shape[0], self.weight.shape[0]) - dtype = input_tensor.dtype - device = input_tensor.device - output_tensor = torch.empty(shape, dtype=dtype, device=device, requires_grad=False) - - input_tensor_quant, input_tensor_scale = self.act_quant_func(input_tensor) - torch.ops._C.cutlass_scaled_mm( - output_tensor, - input_tensor_quant, - self.weight.t(), - input_tensor_scale, - self.weight_scale.float(), - self.bias, - ) - return output_tensor.unsqueeze(0) - - @classmethod - @torch.no_grad() - def new(cls, module): - in_features = module.in_features - out_features = module.out_features - bias = module.bias is not None - new_module = cls(in_features, out_features, bias) - return new_module - - -class VllmQuantLinearFp8(nn.Module): - def __init__(self, in_features, out_features, bias=True): - super().__init__() - self.in_features = in_features - self.out_features = out_features - self.register_buffer('weight', torch.empty((out_features, in_features), dtype=torch.float8_e4m3fn)) # noqa - self.register_buffer('weight_scale', torch.empty((out_features, 1), dtype=torch.float32)) - if bias: - self.register_buffer('bias', torch.empty(out_features, dtype=torch.bfloat16)) - else: - self.register_buffer('bias', None) - - def act_quant_func(self, x): - input_tensor_quant, input_tensor_scale \ - = ops.scaled_fp8_quant(x, None, scale_ub=None, use_per_token_if_dynamic=True) - return input_tensor_quant, input_tensor_scale - - def forward(self, input_tensor): - input_tensor = input_tensor.squeeze(0) - shape = (input_tensor.shape[0], self.weight.shape[0]) - dtype = input_tensor.dtype - device = input_tensor.device - output_tensor = torch.empty(shape, dtype=dtype, device=device, requires_grad=False) - input_tensor_quant, input_tensor_scale = self.act_quant_func(input_tensor) - torch.ops._C.cutlass_scaled_mm( - output_tensor, - input_tensor_quant, - self.weight.t(), - input_tensor_scale, - self.weight_scale.float(), - self.bias, - ) - - return output_tensor.unsqueeze(0) - - @classmethod - @torch.no_grad() - def new(cls, module): - in_features = module.in_features - out_features = module.out_features - bias = module.bias is not None - new_module = cls(in_features, out_features, bias) - return new_module - - -class LlmcFp8Linear(nn.Module): - def __init__(self, in_features, out_features, bias, block_size): - super().__init__() - self.block_size = block_size - self.in_features = in_features - self.out_features = out_features - if bias: - self.bias = nn.Parameter(torch.empty(out_features)) - else: - self.register_parameter('bias', None) - - # Init empty weight and scale - self.weight = nn.Parameter( - torch.empty(out_features, in_features, dtype=torch.float8_e4m3fn) - ) - scale_out_features = (out_features + block_size - 1) // block_size - scale_in_features = (in_features + block_size - 1) // block_size - self.weight_scale_inv = nn.Parameter( - torch.empty(scale_out_features, scale_in_features, dtype=torch.float32) - ) - - def forward(self, x): - if self.weight.data.dtype == torch.float8_e4m3fn: - if USE_FP8GEMM_TRITON_KERNEL: - y = block_wise_fp8_forward_func( - x, - self.weight, - self.weight_scale_inv, - self.block_size, - self.bias - ) - return y - else: - self.weight.data \ - = weight_cast_to_bf16(self.weight.data, - self.weight_scale_inv.data, - self.block_size).to(torch.bfloat16) - y = torch.functional.F.linear(x, self.weight, self.bias) - return y - - @classmethod - @torch.no_grad() - def new(cls, module, block_size): - in_features = module.in_features - out_features = module.out_features - bias = module.bias is not None - new_module = cls(in_features, out_features, bias, block_size) - return new_module - - def __repr__(self): - return ( - 'LlmcFp8Linear(' - + f'in_features={self.in_features}, ' - + f'out_features={self.out_features}, ' - + f'bias={self.bias is not None}, ' - + f'weight_shape={self.weight.shape}, ' - + f'weight_dtype={self.weight.dtype}, ' - + f'block_size={self.block_size}, ' - # + f"scales_shape={self.weight_scale_inv.shape}, " - # + f"scales_dtype={self.weight_scale_inv.dtype}, " - + f'use_fp8gemm_triton_kernel={USE_FP8GEMM_TRITON_KERNEL})' - ) - - -class LlmcActFn(nn.Module): - def __init__(self, module, a_qdq) -> None: - super().__init__() - self.act_fn = module - self.a_qdq = a_qdq - self.calib = True - - def forward(self, x): - if self.a_qdq is not None and not self.calib: - x = self.a_qdq(x, self) - x = self.act_fn(x) - return x - - @classmethod - @torch.no_grad() - def new(cls, module, a_qdq): - new_module = cls(module, a_qdq) - return new_module - - def disable_calib(self): - self.calib = False - - def __repr__(self): - return f'LlmcActFn(calib={self.calib})' - - -class RectifiedSigmoid(nn.Module): - def __init__(self, gamma, zeta): - super(RectifiedSigmoid, self).__init__() - self.gamma = gamma - self.zeta = zeta - - def forward(self, x): - return torch.clamp( - torch.sigmoid(x) * (self.zeta - self.gamma) + self.gamma, 0, 1 - ) - - def inverse(self, y): - """Return x that satisfies y = RectifiedSigmoid(x)""" - return -torch.log((self.zeta - self.gamma) / (y - self.gamma) - 1) - - -class LlmcLayerNorm(nn.Module): - def __init__(self, weight, bias, eps, normalized_shape, elementwise_affine): - super().__init__() - self.register_buffer('weight', weight) - if bias is not None: - self.register_buffer('bias', bias) - else: - self.bias = None - self.eps = eps - self.norm_func = nn.functional.layer_norm - self.normalized_shape = normalized_shape - self.elementwise_affine = elementwise_affine - self.use_tmp_parameter = False - - def forward(self, x): - if self.use_tmp_parameter: - weight = self.tmp_weight - bias = self.tmp_bias - else: - weight = self.weight - bias = self.bias - out = self.norm_func(x, self.normalized_shape, weight, bias, eps=self.eps) - return out - - @classmethod - @torch.no_grad() - def new(cls, module): - weight = module.weight.data - if module.bias is not None: - bias = module.bias.data - else: - bias = None - eps = module.eps - normalized_shape = module.normalized_shape - elementwise_affine = module.elementwise_affine - - new_module = cls(weight, bias, eps, normalized_shape, elementwise_affine) - - return new_module - - def __repr__(self): - return ( - f'LlmcLayerNorm({self.normalized_shape},' - f'eps={self.eps},' - f'elementwise_affine={self.elementwise_affine})' - ) - - -class LlmcLlamaRMSNorm(nn.Module): - def __init__(self, weight, eps=1e-6): - super().__init__() - self.register_buffer('weight', weight) - self.bias = None - self.variance_epsilon = eps - self.use_tmp_parameter = False - - def forward(self, hidden_states): - input_dtype = hidden_states.dtype - variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) - hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) - if self.use_tmp_parameter: - weight = self.tmp_weight - bias = self.tmp_bias if hasattr(self, 'tmp_bias') else None - else: - weight = self.weight - bias = self.bias if hasattr(self, 'bias') else None - - return ( - (weight * hidden_states + bias).to(input_dtype) - if bias is not None - else (weight * hidden_states).to(input_dtype) - ) - - @classmethod - @torch.no_grad() - def new(cls, module): - weight = module.weight.data - eps = module.variance_epsilon - new_module = cls(weight, eps) - return new_module - - def __repr__(self): - return 'LlmcLlamaRMSNorm()' - - -class LlmcRMSNorm(nn.Module): - def __init__(self, weight, eps=1e-6): - super().__init__() - self.variance_epsilon = eps - self.weight = nn.Parameter(torch.ones_like(weight)) - - def forward(self, hidden_states): - input_dtype = hidden_states.dtype - variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) - hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) - return hidden_states.to(input_dtype) - - @classmethod - @torch.no_grad() - def new(cls, module): - if hasattr(module, 'eps'): - eps = module.eps - else: - eps = module.variance_epsilon - weight = module.weight - new_module = cls(weight, eps) - return new_module - - def __repr__(self): - return 'LlmcRMSNorm()' - - -class LlmcQwen2RMSNorm(LlmcLlamaRMSNorm): - def __init__(self, weight, eps=1e-6): - super().__init__(weight, eps) - - def __repr__(self): - return 'LlmcQwen2RMSNorm()' - - -class LlmcMixtralRMSNorm(LlmcLlamaRMSNorm): - def __init__(self, weight, eps=1e-6): - super().__init__(weight, eps) - - def __repr__(self): - return 'LlmcMixtralRMSNorm()' - - -class LlmcMistralRMSNorm(LlmcLlamaRMSNorm): - def __init__(self, weight, eps=1e-6): - super().__init__(weight, eps) - - def __repr__(self): - return 'LlmcMistralRMSNorm()' - - -class LlmcInternLM2RMSNorm(LlmcLlamaRMSNorm): - def __init__(self, weight, eps=1e-6): - super().__init__(weight, eps) - - def __repr__(self): - return 'LlmcInternLM2RMSNorm()' - - -class LlmcGemma2RMSNorm(LlmcLlamaRMSNorm): - def __init__(self, weight, eps=1e-6): - super().__init__(weight, eps) - - def __repr__(self): - return 'LlmcGemma2RMSNorm()' - - -class LlmcMiniCPMRMSNorm(LlmcLlamaRMSNorm): - def __init__(self, weight, eps=1e-6): - super().__init__(weight, eps) - - def __repr__(self): - return 'LlmcMiniCPMRMSNorm()' - - -class OriginFloatLinear(nn.Module): - def __init__(self, weight, bias, ori_module): - super().__init__() - self.register_buffer('weight', weight) - if bias is not None: - self.register_buffer('bias', bias) - else: - self.bias = None - - for name, buf in ori_module.named_buffers(): - if name.startswith('buf_'): - self.register_buffer(name, buf.data) - if hasattr(self, 'buf_rotate') and self.buf_rotate: - self.rotater = ori_module.rotater - else: - self.buf_rotate = False - - if self.weight.data.dtype == torch.float8_e4m3fn: - self.fp8_forward = True - self.weight_scale_inv = ori_module.weight_scale_inv - self.block_size = ori_module.block_size - else: - self.fp8_forward = False - - @torch.no_grad() - def forward(self, x): - if hasattr(self, 'buf_rotate') and self.buf_rotate: - x = self.rotater.rotate(x) - if self.fp8_forward: - y = block_wise_fp8_forward_func( - x, self.weight, self.weight_scale_inv, self.block_size, self.bias - ) - else: - y = torch.functional.F.linear(x, self.weight, self.bias) - return y - - @classmethod - @torch.no_grad() - def new(cls, module): - if isinstance(module, nn.Linear): - return module - - weight = module.weight.data - if module.bias is not None: - bias = module.bias.data - else: - bias = None - - new_module = cls(weight, bias, module) - - new_module.in_features = module.in_features - new_module.out_features = module.out_features - return new_module - - def __repr__(self): - return ( - f'OriginFloatLinear(in_features={self.in_features},' - f'out_features={self.out_features},' - f'online_rotate={self.buf_rotate},' - f'fp8_forward={self.fp8_forward},' - f'bias={self.bias is not None})' - ) - - -class Rotater: - def __init__( - self, online_full_had, online_partial_had, fp32_had, K, had_K=None, had_dim=None - ): - self.online_full_had = online_full_had - self.online_partial_had = online_partial_had - self.fp32_had = fp32_had - self.K = K - self.had_K = had_K - self.had_dim = had_dim - - def rotate(self, x): - x_dtype = x.dtype - - if self.online_full_had: - if self.fp32_had: - x = matmul_hadU_cuda(x.float(), self.had_K, self.K).to(x_dtype) - else: - x = matmul_hadU_cuda(x, self.had_K, self.K) - - elif self.online_partial_had: - if self.fp32_had: - x = x.float() - init_shape = x.shape - if self.K == 1: - x = fast_hadamard_transform.hadamard_transform( - x.reshape( - -1, init_shape[-1] // self.had_dim, self.had_dim - ).transpose(1, 2), - scale=1 / math.sqrt(init_shape[-1] // self.had_dim), - ).transpose(1, 2) - else: - self.had_K = self.had_K.to(x.device) - - x = ( - self.had_K.to(x.dtype) - @ x.reshape(-1, init_shape[-1] // self.had_dim, self.had_dim) - ) / math.sqrt(init_shape[-1] // self.had_dim) - - if self.fp32_had: - x = x.to(x_dtype) - x = x.reshape(init_shape) - - return x - - -class RotateLinear(nn.Module): - def __init__( - self, - weight, - bias, - ori_module, - online_full_had, - online_partial_had, - fp32_had, - K, - had_K, - had_dim, - ): - super().__init__() - self.register_buffer('weight', weight) - if bias is not None: - self.register_buffer('bias', bias) - else: - self.bias = None - - for name, buf in ori_module.named_buffers(): - if name.startswith('buf_'): - self.register_buffer(name, buf.data) - - self.rotater = Rotater( - online_full_had, online_partial_had, fp32_had, K, had_K, had_dim - ) - self.register_buffer('buf_rotate', torch.tensor(True)) - - def forward(self, x): - x = self.rotater.rotate(x) - x = torch.functional.F.linear(x, self.weight, self.bias) - - return x - - @classmethod - @torch.no_grad() - def new( - cls, module, online_full_had, online_partial_had, fp32_had, K, had_K, had_dim - ): - weight = module.weight.data - if module.bias is not None: - bias = module.bias.data - else: - bias = None - - new_module = cls( - weight, - bias, - ori_module=module, - online_full_had=online_full_had, - online_partial_had=online_partial_had, - fp32_had=fp32_had, - K=K, - had_K=had_K, - had_dim=had_dim, - ) - - new_module.in_features = module.in_features - new_module.out_features = module.out_features - return new_module - - @classmethod - def get_func_name(cls, any_callable): - if isinstance(any_callable, partial): - return any_callable.func.__name__ - return any_callable.__name__ - - def register_activation_parameters(self, named_parameters): - pass - - def __repr__(self): - return ( - f'RotateLinear(in_features={self.in_features},' - f'out_features={self.out_features},' - f'bias={self.bias is not None},' - f'online_rotate={self.buf_rotate})' - ) - - -class FakeQuantLinear(nn.Module): - def __init__(self, weight, bias, ori_module, w_qdq, a_qdq): - super().__init__() - self.register_buffer('weight', weight) - if bias is not None: - self.register_buffer('bias', bias) - else: - self.bias = None - self.a_qdq = a_qdq - self.w_qdq = w_qdq - - for name, buf in ori_module.named_buffers(): - if name.startswith('buf_'): - self.register_buffer(name, buf.data) - for name, buf in ori_module.named_parameters(): - if name.startswith('buf_'): - self.register_buffer(name, buf.data) - - if hasattr(self, 'buf_rotate') and self.buf_rotate: - self.rotater = ori_module.rotater - else: - self.buf_rotate = False - - if self.weight.data.dtype == torch.float8_e4m3fn: - self.fp8_forward = True - self.weight_scale_inv = ori_module.weight_scale_inv - self.block_size = ori_module.block_size - else: - self.fp8_forward = False - - self.dynamic_quant_weight = False - self.dynamic_quant_tmp_weight = False - - def forward(self, x): - if hasattr(self, 'buf_rotate') and self.buf_rotate: - x = self.rotater.rotate(x) - - if self.a_qdq is not None: - x = self.a_qdq(x, self) - - if not hasattr(self, 'tmp_weight'): - tmp_weight = self.w_qdq(self) - self.register_buffer('tmp_weight', tmp_weight, persistent=False) - self.tmp_bias = self.bias - - elif self.dynamic_quant_weight: - self.tmp_weight = self.w_qdq(self) - self.tmp_bias = self.bias - - elif self.dynamic_quant_tmp_weight: - self.tmp_weight = self.w_qdq(self) - - if self.fp8_forward: - y = block_wise_fp8_forward_func( - x, self.weight, self.weight_scale_inv, self.block_size, self.bias - ) - else: - y = torch.functional.F.linear(x, self.tmp_weight, self.tmp_bias) - return y - - @classmethod - @torch.no_grad() - def new(cls, module, w_qdq, a_qdq): - weight = module.weight.data - if hasattr(module, 'bias') and module.bias is not None: - bias = module.bias.data - else: - bias = None - - new_module = cls(weight, bias, ori_module=module, w_qdq=w_qdq, a_qdq=a_qdq) - - new_module.in_features = module.in_features - new_module.out_features = module.out_features - new_module.w_qdq_name = cls.get_func_name(w_qdq) - new_module.a_qdq_name = ( - cls.get_func_name(a_qdq) if a_qdq is not None else 'None' - ) - return new_module - - @classmethod - def get_func_name(cls, any_callable): - if isinstance(any_callable, partial): - return any_callable.func.__name__ - return any_callable.__name__ - - def __repr__(self): - return ( - f'FakeQuantLinear(in_features={self.in_features},' - f'out_features={self.out_features}, bias={self.bias is not None},' - f'weight_quant={self.w_qdq_name},' - f'act_quant={self.a_qdq_name},' - f'online_rotate={self.buf_rotate})' - ) - - -class EffcientFakeQuantLinear(nn.Module): - def __init__(self, weight, bias, ori_module, a_qdq): - super().__init__() - self.register_buffer('weight', weight) - if bias is not None: - self.register_buffer('bias', bias) - else: - self.bias = None - self.a_qdq = a_qdq - - for name, buf in ori_module.named_buffers(): - if name.startswith('buf_'): - self.register_buffer(name, buf.data) - if hasattr(self, 'buf_rotate') and self.buf_rotate: - self.rotater = ori_module.rotater - else: - self.buf_rotate = False - - if self.weight.data.dtype == torch.float8_e4m3fn: - self.fp8_forward = True - self.weight_scale_inv = ori_module.weight_scale_inv - self.block_size = ori_module.block_size - else: - self.fp8_forward = False - - @torch.no_grad() - def forward(self, x): - if hasattr(self, 'buf_rotate') and self.buf_rotate: - x = self.rotater.rotate(x) - - if self.a_qdq is not None: - x = self.a_qdq(x, self) - - if self.fp8_forward: - y = block_wise_fp8_forward_func( - x, self.weight, self.weight_scale_inv, self.block_size, self.bias - ) - else: - y = torch.functional.F.linear(x, self.weight, self.bias) - return y - - @classmethod - @torch.no_grad() - def new(cls, module, w_qdq, a_qdq, debug_print={}): - weight = w_qdq(module) - - if module.bias is not None: - bias = module.bias.data - else: - bias = None - - new_module = cls(weight, bias, ori_module=module, a_qdq=a_qdq) - - new_module.in_features = module.in_features - new_module.out_features = module.out_features - new_module.w_qdq_name = cls.get_func_name(w_qdq) - new_module.a_qdq_name = ( - cls.get_func_name(a_qdq) if a_qdq is not None else 'None' - ) - new_module.debug_print = debug_print - return new_module - - @classmethod - def get_func_name(cls, any_callable): - if isinstance(any_callable, partial): - return any_callable.func.__name__ - return any_callable.__name__ - - def __repr__(self): - return ( - f'EffcientFakeQuantLinear(in_features={self.in_features},' - f'out_features={self.out_features},' - f'bias={self.bias is not None},' - f'weight_quant={self.w_qdq_name},' - f'act_quant={self.a_qdq_name},' - f'online_rotate={self.buf_rotate},' - f'fp8_forward={self.fp8_forward},' - f'debug_print={self.debug_print})' - ) - - -class VllmRealQuantLinear(nn.Module): - def __init__(self, weight, bias, scales, input_scale, need_pack, scales_name): - super().__init__() - weight_name = 'weight_packed' if need_pack else 'weight' - self.register_buffer(weight_name, weight) - - ( - self.register_buffer('bias', bias) - if bias is not None - else setattr(self, 'bias', None) - ) - - self.register_buffer(scales_name, scales) - self.register_buffer('input_scale', input_scale) - - @torch.no_grad() - def forward(self, x): - raise NotImplementedError - - @classmethod - @torch.no_grad() - def new(cls, module, w_q, quant_config): - weight, scales = cls.quant_pack(module, w_q, quant_config) - if hasattr(module, 'buf_act_scales_0'): - input_scale = module.buf_act_scales_0 - else: - input_scale = None - if ( - 'act' in quant_config - and quant_config.act.get('static', False) - and quant_config.get('quant_type', 'int-quant') == 'int-quant' - ): - input_scale = input_scale.unsqueeze(0) - - if module.bias is not None: - bias = module.bias.data - else: - bias = None - - need_pack = quant_config['weight'].get('need_pack', False) - - if quant_config['weight']['granularity'] == 'per_block': - scales_name = 'weight_scale_inv' - else: - scales_name = 'weight_scale' - - new_module = cls(weight, bias, scales, input_scale, need_pack, scales_name) - new_module.in_features = module.in_features - new_module.out_features = module.out_features - new_module.weight_shape = weight.shape - new_module.weight_dtype = weight.dtype - new_module.scales_shape = scales.shape - new_module.scales_dtype = scales.dtype - - new_module.zeros_shape = None - new_module.zeros_dtype = None - - return new_module - - @classmethod - @torch.no_grad() - def quant_pack(cls, module, w_q, quant_config): - if module.weight.data.dtype == torch.float8_e4m3fn: - module.weight.data = weight_cast_to_bf16( - module.weight.data, - module.weight_scale_inv.data, - module.block_size - ).to(torch.bfloat16) - weight, scales, zeros = w_q(module) - need_pack = quant_config['weight'].get('need_pack', False) - if need_pack: - weight, scales = cls.pack(weight, scales, quant_config) - return weight, scales - - @classmethod - @torch.no_grad() - def pack(self, weight, scales, quant_config): - - # Packs a tensor of quantized weights stored in int8 into int32s with padding - num_bits = quant_config['weight']['bit'] - - # convert to unsigned for packing - offset = pow(2, num_bits) // 2 - weight = (weight + offset).to(torch.uint8) - weight = weight.cpu().numpy().astype(np.uint32) - pack_factor = 32 // num_bits - - # pad input tensor and initialize packed output - packed_size = math.ceil(weight.shape[1] / pack_factor) - packed = np.zeros((weight.shape[0], packed_size), dtype=np.uint32) - padding = packed.shape[1] * pack_factor - weight.shape[1] - weight = np.pad(weight, pad_width=[(0, 0), (0, padding)], constant_values=0) - - # pack values - for i in range(pack_factor): - packed |= weight[:, i::pack_factor] << num_bits * i - - packed = np.ascontiguousarray(packed).view(np.int32) - int_weight = torch.from_numpy(packed).cuda() - del weight, packed - return int_weight, scales.to(torch.float16) - - def __repr__(self): - return ( - 'VllmRealQuantLinear(' - + f'in_features={self.in_features}, ' - + f'out_features={self.out_features}, ' - + f'bias={self.bias is not None}, ' - + f'weight_shape={self.weight_shape}, ' - + f'weight_dtype={self.weight_dtype}, ' - + f'scales_shape={self.scales_shape}, ' - + f'scales_dtype={self.scales_dtype}, ' - + f'zeros_shape={self.zeros_shape}, ' - + f'zeros_dtype={self.zeros_dtype})' - ) - - -class LightllmRealQuantLinear(VllmRealQuantLinear): - def __init__(self, weight, bias, scales, input_scale, need_pack, scales_name): - super().__init__(weight, bias, scales, input_scale, need_pack, scales_name) - - def __repr__(self): - return ( - 'LightllmRealQuantLinear(' - + f'in_features={self.in_features}, ' - + f'out_features={self.out_features}, ' - + f'bias={self.bias is not None}, ' - + f'weight_shape={self.weight_shape}, ' - + f'weight_dtype={self.weight_dtype}, ' - + f'scales_shape={self.scales_shape}, ' - + f'scales_dtype={self.scales_dtype}, ' - + f'zeros_shape={self.zeros_shape}, ' - + f'zeros_dtype={self.zeros_dtype})' - ) - - -class Lightx2vRealQuantLinear(VllmRealQuantLinear): - def __init__(self, weight, bias, scales, input_scale, need_pack, scales_name): - super().__init__(weight, bias, scales, input_scale, need_pack, scales_name) - - def __repr__(self): - return ( - 'Lightx2vRealQuantLinear(' - + f'in_features={self.in_features}, ' - + f'out_features={self.out_features}, ' - + f'bias={self.bias is not None}, ' - + f'weight_shape={self.weight_shape}, ' - + f'weight_dtype={self.weight_dtype}, ' - + f'scales_shape={self.scales_shape}, ' - + f'scales_dtype={self.scales_dtype}, ' - + f'zeros_shape={self.zeros_shape}, ' - + f'zeros_dtype={self.zeros_dtype})' - ) - - -class SglRealQuantLinear(VllmRealQuantLinear): - def __init__(self, weight, bias, scales, input_scale, need_pack, scales_name): - super().__init__(weight, bias, scales, input_scale, need_pack, scales_name) - - def __repr__(self): - return ( - 'SglRealQuantLinear(' - + f'in_features={self.in_features}, ' - + f'out_features={self.out_features}, ' - + f'bias={self.bias is not None}, ' - + f'weight_shape={self.weight_shape}, ' - + f'weight_dtype={self.weight_dtype}, ' - + f'scales_shape={self.scales_shape}, ' - + f'scales_dtype={self.scales_dtype}, ' - + f'zeros_shape={self.zeros_shape}, ' - + f'zeros_dtype={self.zeros_dtype})' - ) - - -class AutoawqRealQuantLinear(nn.Module): - def __init__(self, weight, bias, scales, zeros): - super().__init__() - self.register_buffer('qweight', weight) - - ( - self.register_buffer('bias', bias) - if bias is not None - else setattr(self, 'bias', None) - ) - - self.register_buffer('scales', scales) - - ( - self.register_buffer('qzeros', zeros) - if zeros is not None - else setattr(self, 'qzeros', None) - ) - - @torch.no_grad() - def forward(self, x): - raise NotImplementedError - - @classmethod - @torch.no_grad() - def new(cls, module, w_q, quant_config): - weight, scales, zeros = cls.quant_pack(module, w_q, quant_config) - if module.bias is not None: - bias = module.bias.data - else: - bias = None - - new_module = cls(weight, bias, scales, zeros) - new_module.in_features = module.in_features - new_module.out_features = module.out_features - new_module.weight_shape = weight.shape - new_module.weight_dtype = weight.dtype - new_module.scales_shape = scales.shape - new_module.scales_dtype = scales.dtype - - if zeros is not None: - new_module.zeros_shape = zeros.shape - new_module.zeros_dtype = zeros.dtype - else: - new_module.zeros_shape = None - new_module.zeros_dtype = None - - return new_module - - @classmethod - @torch.no_grad() - def quant_pack(cls, module, w_q, quant_config): - if module.weight.data.dtype == torch.float8_e4m3fn: - module.weight.data = weight_cast_to_bf16( - module.weight.data, - module.weight_scale_inv.data, - module.block_size - ).to(torch.bfloat16) - _, scales, zeros = w_q(module) - pack_version = quant_config['weight']['pack_version'] - if pack_version == 'gemm_pack': - int_weight, scales, int_zeros = cls.gemm_pack( - module, module.weight.data, scales, zeros, quant_config - ) - else: - raise NotImplementedError(f'Not support {pack_version}.') - return int_weight, scales, int_zeros - - @classmethod - @torch.no_grad() - def gemm_pack(self, module, weight, scales, zeros, quant_config): - - assert scales is not None and zeros is not None - scales = scales.t().contiguous().to(torch.float16) - zeros = zeros.t().contiguous() - - scale_zeros = zeros * scales - - bit = quant_config['weight']['bit'] - pack_num = 32 // bit - group_size = quant_config['weight']['group_size'] - - intweight = [] - - awq_linear_in_features = module.in_features - - for idx in range(awq_linear_in_features): - intweight.append( - torch.round( - (weight.data[:, idx] + scale_zeros[idx // group_size]) - / scales[idx // group_size] - ).to(torch.int)[:, None] - ) - intweight = torch.cat(intweight, dim=1) - intweight = intweight.t().contiguous() - intweight = intweight.to(dtype=torch.int32) - intweight = intweight.cuda() - - qweight = torch.zeros( - (intweight.shape[0], intweight.shape[1] // 32 * bit), - dtype=torch.int32, - device=intweight.device, - ) - for col in range(intweight.shape[1] // pack_num): - if bit == 4: - order_map = [0, 2, 4, 6, 1, 3, 5, 7] - else: - raise NotImplementedError('Only 4-bit are supported for now.') - for i in range(pack_num): - qweight_col = intweight[:, col * pack_num + order_map[i]] - qweight[:, col] |= qweight_col << (i * bit) - - zeros = zeros.to(dtype=torch.int32, device='cuda') - qzeros = torch.zeros( - (zeros.shape[0], zeros.shape[1] // 32 * bit), - dtype=torch.int32, - device=zeros.device, - ) - - for col in range(zeros.shape[1] // pack_num): - if bit == 4: - order_map = [0, 2, 4, 6, 1, 3, 5, 7] - else: - raise NotImplementedError('Only 4-bit are supported for now.') - for i in range(pack_num): - qzero_col = zeros[:, col * pack_num + order_map[i]] - qzeros[:, col] |= qzero_col << (i * bit) - - del weight - return qweight, scales, qzeros - - -class MlcllmRealQuantLinear(AutoawqRealQuantLinear): - def __init__(self, weight, bias, scales, zeros): - super().__init__(weight, bias, scales, zeros) - - def __repr__(self): - return ( - 'MlcllmRealQuantLinear(' - + f'in_features={self.in_features}, ' - + f'out_features={self.out_features}, ' - + f'bias={self.bias is not None}, ' - + f'weight_shape={self.weight_shape}, ' - + f'weight_dtype={self.weight_dtype}, ' - + f'scales_shape={self.scales_shape}, ' - + f'scales_dtype={self.scales_dtype}, ' - + f'zeros_shape={self.zeros_shape}, ' - + f'zeros_dtype={self.zeros_dtype})' - ) - - -_TRANSFORMERS_LN_TYPES_ = ALL_LAYERNORM_LAYERS -_TRANSFORMERS_LINEAR_TYPES_ = [nn.Linear] - -_MODEL_LN_TYPES_PAIRS_ = { - 'Llama': LlmcLlamaRMSNorm, - 'Llava': LlmcLlamaRMSNorm, - 'Mistral': LlmcMistralRMSNorm, - 'Mixtral': LlmcMixtralRMSNorm, - 'Interlm2': LlmcInternLM2RMSNorm, - 'Qwen2': LlmcQwen2RMSNorm, - 'Gemma2': LlmcGemma2RMSNorm, - 'MiniCPM': LlmcMiniCPMRMSNorm, - 'Starcoder': LlmcLayerNorm, - 'Opt': LlmcLayerNorm, - 'Bloom': LlmcLayerNorm, -} - - -_LLMC_LN_TYPES_ = [ - LlmcLayerNorm, - LlmcLlamaRMSNorm, - LlmcRMSNorm, - LlmcQwen2RMSNorm, - LlmcMistralRMSNorm, - LlmcMixtralRMSNorm, - LlmcInternLM2RMSNorm, - LlmcGemma2RMSNorm, - LlmcMiniCPMRMSNorm, -] - - -_LLMC_LINEAR_TYPES_ = [ - LlmcFp8Linear, - OriginFloatLinear, - RotateLinear, - FakeQuantLinear, - EffcientFakeQuantLinear, - VllmRealQuantLinear, - SglRealQuantLinear, - AutoawqRealQuantLinear, - MlcllmRealQuantLinear, - LightllmRealQuantLinear, -] - -_REALQUANT_LINEAR_MAP_ = { - 'vllm_quant': VllmRealQuantLinear, - 'lightllm_quant': LightllmRealQuantLinear, - 'sgl_quant': SglRealQuantLinear, - 'autoawq_quant': AutoawqRealQuantLinear, - 'mlcllm_quant': MlcllmRealQuantLinear, - 'lightx2v_quant': Lightx2vRealQuantLinear, -} diff --git a/llmc/compression/quantization/ntweak.py b/llmc/compression/quantization/ntweak.py deleted file mode 100644 index b758bf2a8..000000000 --- a/llmc/compression/quantization/ntweak.py +++ /dev/null @@ -1,210 +0,0 @@ -import functools -import gc -import math -from contextlib import nullcontext -from math import inf - -import torch -import torch.nn as nn -from loguru import logger -from tqdm import tqdm - -from llmc.utils.registry_factory import ALGO_REGISTRY - -from .base_blockwise_quantization import BaseBlockwiseQuantization -from .module_utils import (_LLMC_LN_TYPES_, _MODEL_LN_TYPES_PAIRS_, - FakeQuantLinear) -from .train_utils import LossFunction, NativeScalerWithGradNormCount - - -@ALGO_REGISTRY -class NormTweaking(BaseBlockwiseQuantization): - def __init__(self, model, quant_config, input, padding_mask, config): - super().__init__(model, quant_config, input, padding_mask, config) - self.add_quant_config() - - model_type = self.config['model']['type'] - self.attention_mask = self.input['kwargs'][0].get('attention_mask') - self.position_ids = ( - self.input['kwargs'][0].get('position_ids') - if model_type in ['Llama', 'Mistral', 'Qwen2'] - else None - ) - - if self.deactive_amp: - self.batch_mask = self._repeat_attention_mask() - else: - self.batch_mask = ( - self._repeat_attention_mask().float() - if self.attention_mask is not None - else None - ) - - self.dev = torch.device('cuda') - self.model_dtype = next(self.model.model.parameters()).dtype - - def _repeat_attention_mask(self): - if self.attention_mask is not None: - return self.attention_mask.repeat( - self.input['data'][0].shape[0], 1, 1, 1 - ).cuda() - return None - - def add_quant_config(self): - self.prefix = self.model.block_name_prefix - self.loss_func = LossFunction(method='mse') - self.deactive_amp = self.quant_config['special']['deactive_amp'] - - if self.deactive_amp: - self.dtype = torch.float - self.traincast = nullcontext - else: - self.dtype = self.model_dtype - self.traincast = torch.cuda.amp.autocast - self.epochs = self.quant_config['special']['epochs'] - self.ntweak_lr = self.quant_config['special']['ntweak_lr'] - self.gamma = self.quant_config['special']['gamma'] - - def block_forward(self, block, input_data=None): - output = [] - - if input_data is None: - input_data = self.input['data'] - - for i in range(len(input_data)): - input_data[i] = input_data[i].to(device=next(block.parameters()).device) - if ( - 'attention_mask' in self.input['kwargs'][i] - and self.input['kwargs'][i]['attention_mask'] is not None - ): - self.input['kwargs'][i]['attention_mask'] = self.input['kwargs'][i][ - 'attention_mask' - ].cuda() - with torch.no_grad(): - with torch.cuda.amp.autocast(): - out = block(input_data[i], **self.input['kwargs'][i])[0] - output.append(out) - return output - - def get_original_out(self, block): - if self.block_idx == 0: - self.ori_out = self.block_forward(block) - else: - self.ori_out = self.block_forward(block, self.ori_out) - - def block_transform(self, block, input_feat, block_kwargs): - logger.info(f'Start transform the {self.block_idx}-th block') - - with torch.no_grad(): - block.float() - - for i in range(len(self.input['data'])): - self.input['data'][i] = self.input['data'][i].to(self.dtype) - - self.get_original_out(block) - self.register_tweak_parameters(block) - self.ntweak_train(block) - - self.apply_layer_norms(block) - - logger.info(f'End transform the {self.block_idx}-th block') - - def ntweak_train(self, block): - optimizer = torch.optim.Adam([{'params': self.get_tweak_parameters(block)}]) - self.adjust_learning_rate(optimizer) - - for param_group in optimizer.param_groups: - logger.info(param_group['lr']) - - loss_scaler = NativeScalerWithGradNormCount() - - for epochs in range(self.epochs): - loss_list = [] - norm_list = [] - - for i in range(len(self.input['data'])): - with self.traincast(): - if self.position_ids is not None: - quant_out = block( - self.input['data'][i], - attention_mask=self.batch_mask, - position_ids=self.position_ids, - )[0] - else: - quant_out = block( - self.input['data'][i], attention_mask=self.batch_mask - )[0] - - loss = self.loss_func(self.ori_out[i].to(self.dtype), quant_out) - - if not math.isfinite(loss.item()): - logger.info('Loss is NAN, stopping training') - - loss_list.append(loss.data) - optimizer.zero_grad() - norm = loss_scaler( - loss, optimizer, parameters=self.get_tweak_parameters(block) - ) - norm_list.append(norm.data) - - loss_mean = torch.stack(loss_list).mean() - norm_mean = torch.stack(norm_list).mean() - logger.info( - f'block {self.block_idx} iter {epochs}' - f'loss:{loss_mean} norm:{norm_mean}' - ) - - del optimizer - - def apply_layer_norms(self, block): - for n, m in block.named_modules(): - if isinstance(m, tuple(_LLMC_LN_TYPES_)): - m.weight = m.tmp_weight - del m.tmp_weight - if hasattr(m, 'bias') and m.bias is not None: - m.bias = m.tmp_bias - del m.tmp_bias - m.use_tmp_parameter = False - - def register_tweak_parameters(self, block): - self.model.replace_module_block( - FakeQuantLinear, - block, - self.block_idx, - self.get_replacement_params( - mode='fake_quant', w_only=self.w_only, name=None - ), - ) - - llmc_ln_module = _MODEL_LN_TYPES_PAIRS_[self.config['model']['type']] - self.model.replace_module_block(llmc_ln_module, block, self.block_idx, {}) - - for n, m in block.named_modules(): - if isinstance(m, tuple(_LLMC_LN_TYPES_)): - m.register_parameter('tmp_weight', nn.Parameter(m.weight)) - if hasattr(m, 'bias') and m.bias is not None: - m.register_parameter('tmp_bias', nn.Parameter(m.bias)) - m.use_tmp_parameter = True - - def get_tweak_parameters(self, block): - params = [] - for n, m in block.named_modules(): - if isinstance(m, tuple(_LLMC_LN_TYPES_)): - params.append(m.tmp_weight) - if hasattr(m, 'tmp_bias'): - params.append(m.tmp_bias) - return iter(params) - - def adjust_learning_rate(self, optimizer): - for param_group in optimizer.param_groups: - param_group['lr'] = self.ntweak_lr * ( - 1 + self.gamma * (self.block_idx / len(self.blocks)) - ) - - def deploy(self, quant_format): - super().deploy(quant_format) - self.model.convert_dtype(self.model_dtype) - - def save_model(self, path): - self.model.convert_dtype(self.model_dtype) - super().save_model(path) diff --git a/llmc/compression/quantization/omniq.py b/llmc/compression/quantization/omniq.py deleted file mode 100644 index e34c6ff5a..000000000 --- a/llmc/compression/quantization/omniq.py +++ /dev/null @@ -1,697 +0,0 @@ -import copy -import functools -import gc -import math -import os -import random -from contextlib import nullcontext -from math import inf - -import numpy as np -import torch -import torch.nn as nn -from loguru import logger -from tqdm import tqdm - -from llmc.utils.registry_factory import ALGO_REGISTRY - -from .base_blockwise_quantization import BaseBlockwiseQuantization -from .module_utils import (_LLMC_LINEAR_TYPES_, _LLMC_LN_TYPES_, - _MODEL_LN_TYPES_PAIRS_, _TRANSFORMERS_LINEAR_TYPES_, - FakeQuantLinear) -from .train_utils import (LossFunction, NativeScalerWithGradNormCount, - TruncateFunction) - - -@ALGO_REGISTRY -class OmniQuant(BaseBlockwiseQuantization): - def __init__(self, model, quant_config, input, padding_mask, config): - super().__init__(model, quant_config, input, padding_mask, config) - self.add_quant_config() - - model_type = self.config['model']['type'] - if ( - model_type not in ['Llama', 'Opt', 'Falcon', 'Mistral', 'Qwen2'] - and self.let - ): - raise ValueError('Only support for opt/llama/Llama-2/falcon/Mistral now') - - self.attention_mask = self.input['kwargs'][0].get('attention_mask') - self.position_ids = ( - self.input['kwargs'][0].get('position_ids') - if model_type in ['Llama', 'Mistral', 'Qwen2'] - else None - ) - - if self.deactive_amp: - self.batch_mask = self._repeat_attention_mask() - else: - self.batch_mask = ( - self._repeat_attention_mask().float() - if self.attention_mask is not None - else None - ) - - self.dev = torch.device('cuda') - self.model_dtype = next(self.model.model.parameters()).dtype - - def _repeat_attention_mask(self): - if self.attention_mask is not None: - return self.attention_mask.repeat( - self.input['data'][0].shape[0], 1, 1, 1 - ).cuda() - return None - - def add_quant_config(self): - config = self.quant_config['special'] - self.prefix = self.model.block_name_prefix - self.loss_func = LossFunction(method='mse') - self.deactive_amp = config['deactive_amp'] - self.wd = config['wd'] - self.dtype = torch.float if self.deactive_amp else torch.float16 - self.traincast = nullcontext if self.deactive_amp else torch.cuda.amp.autocast - self.epochs = config['epochs'] - self.aug_loss = config['aug_loss'] - self.lwc = config['lwc'] - self.search_clip_init = config.get('search_clip_init', False) - self.smooth_up_down = config.get('smooth_up_down', False) - - if self.smooth_up_down and self.config['model']['type'] == 'Llama': - self.model.pairs['down_proj'] = 'down' - - if self.search_clip_init: - self.clip_version = 'v2' - self.load_clip = config.get('load_clip', False) - if self.load_clip: - self.clip_path = config['clip_path'] - self.weight_clips = torch.load( - os.path.join(self.clip_path, 'clips.pth') - ) - if self.lwc: - self.lwc_lr = config['lwc_lr'] - - self.let = config['let'] - if self.let: - if self.config['model']['type'] == 'Falcon': - raise ValueError('Falcon not yet support let') - assert 'attn_lr' in config or 'let_lr' in config - self.let_lr = config['let_lr'] - - self.use_shift = config['use_shift'] - if self.use_shift and not self.model.has_bias(): - raise ValueError("Don't support no bias model use shift") - self.alpha = config['alpha'] - self.search_scale_init = config.get('search_scale_init', False) - if self.search_scale_init: - self.scale_path = config['scale_path'] - self.act_scales = { - k: v.to(torch.float32) - for k, v in torch.load( - os.path.join(self.scale_path, 'scales.pth') - ).items() - } - else: - self.act_scales = self.get_act_scale_shift(stat='scales') - self.act_shifts = ( - self.get_act_scale_shift(stat='shifts') if self.use_shift else False - ) - else: - self.use_shift = False - - if self.epochs > 0: - assert self.lwc or self.let - - def block_forward(self, block, input_data=None): - output = [] - - if input_data is None: - input_data = self.input['data'] - - for i in range(len(input_data)): - input_data[i] = input_data[i].to(device=next(block.parameters()).device) - if ( - 'attention_mask' in self.input['kwargs'][i] - and self.input['kwargs'][i]['attention_mask'] is not None - ): - self.input['kwargs'][i]['attention_mask'] = self.input['kwargs'][i][ - 'attention_mask' - ].cuda() - with torch.no_grad(): - with torch.cuda.amp.autocast(): - out = block(input_data[i], **self.input['kwargs'][i])[0] - output.append(out) - return output - - def get_original_out(self, block): - if self.block_idx == 0: - self.ori_out = self.block_forward(block) - if self.aug_loss: - self.ori_out2 = self.ori_out - else: - self.ori_out = self.block_forward(block, self.ori_out) - if self.aug_loss: - self.ori_out2 = self.block_forward(block) - - def block_transform(self, block, input_feat, block_kwargs): - logger.info(f'Start transform the {self.block_idx}-th block') - - with torch.no_grad(): - block.float() - - for i in range(len(self.input['data'])): - self.input['data'][i] = self.input['data'][i].to(self.dtype) - - self.get_original_out(block) - - self.register_omni_parameters(block, input_feat) - self.omni_train(block) - - if self.let: - subsets = self.model.get_subsets_in_block(block) - for index, subset in enumerate(subsets): - prev_op = subset['prev_op'] - layers_dict = subset['layers'] - self.subset_transform(block, layers_dict, prev_op) - - self.clear_tmp(block) - - logger.info(f'End transform the {self.block_idx}-th block') - - def omni_train(self, block): - params = [] - if self.lwc: - params.append({'params': self.get_lwc_parameters(block), 'lr': self.lwc_lr}) - if self.let: - params.append({'params': self.get_let_parameters(block), 'lr': self.let_lr}) - - if params: - optimizer = torch.optim.AdamW(params, weight_decay=self.wd) - else: - return - - loss_scaler = NativeScalerWithGradNormCount() - - for epoch in range(self.epochs): - loss_list = [] - norm_list = [] - - for i in range(len(self.input['data'])): - with self.traincast(): - if self.let: - self.smooth_tmp_weight(block) - - if self.position_ids is not None: - quant_out = block( - self.input['data'][i], - attention_mask=self.batch_mask, - position_ids=self.position_ids, - )[0] - else: - quant_out = block( - self.input['data'][i], attention_mask=self.batch_mask - )[0] - - loss = self.loss_func(self.ori_out[i], quant_out) - if self.aug_loss: - loss += self.loss_func(self.ori_out2[i], quant_out) - - if not math.isfinite(loss.item()): - logger.info('Loss is NAN, stopping training') - - loss_list.append(loss.data) - optimizer.zero_grad() - norm = loss_scaler( - loss, optimizer, parameters=self.get_omni_parameters(block) - ) - norm_list.append(norm.data) - - loss_mean = torch.stack(loss_list).mean() - norm_mean = torch.stack(norm_list).mean() - logger.info( - f'block {self.block_idx} iter {epoch} loss:{loss_mean} norm:{norm_mean}' - ) - - del optimizer - - def subset_transform(self, block, layers_dict, prev_op): - layers = list(layers_dict.values()) - - if ( - isinstance( - prev_op[0], tuple(_LLMC_LINEAR_TYPES_ + _TRANSFORMERS_LINEAR_TYPES_) - ) - and prev_op[0].out_features != layers[0].in_features - ): - logger.info('Cannot apply scale. Do not transform this subset.') - return - - scale, shift = self.search_scale_shift_subset(block, layers_dict) - - if len(scale): - if len(shift) and shift[0] is not None: - self.apply_shift(shift[0], prev_op, layers) - scale = scale[0] - scale.data = self.truncate(scale) - self.apply_scale(scale, prev_op, layers) - else: - self.smooth_q_k_inplace(block) - - def search_scale_shift_subset(self, block, layers_dict): - scale = [] - shift = [] - for name, module in block.named_parameters(): - if name.endswith('scale'): - for n in layers_dict: - for key in self.model.pairs.keys(): - if key in n and self.model.pairs[key] in name: - scale.append(module) - if name.endswith('shift'): - for n in layers_dict: - for key in self.model.pairs.keys(): - if key in n and self.model.pairs[key] in name: - shift.append(module) - return scale, shift - - def register_omni_parameters(self, block, input_feat): - module = FakeQuantLinear - self.model.replace_module_block( - module, - block, - self.block_idx, - self.get_replacement_params( - mode='fake_quant', w_only=self.w_only, name=None - ), - ) - if self.lwc: - self.register_lwc_parameters(block, input_feat) - if self.let: - self.register_let_parameters(block) - - def register_lwc_parameters(self, block, input_feat, init_value=4.0): - for n, m in block.named_modules(): - if isinstance(m, FakeQuantLinear): - if self.search_clip_init: - low_param, up_param = self.get_clip_parameters(input_feat, n, m) - else: - if self.wquantizer.granularity == 'per_group': - dim = int( - m.weight.data.shape[0] - * math.ceil( - m.weight.data.shape[1] / self.wquantizer.group_size - ) - ) - else: - dim = m.weight.data.shape[0] - if self.wquantizer.sym: - low_param = None - else: - low_param = nn.Parameter( - torch.ones( - (dim, 1), - device=self.dev, - dtype=self.dtype, - ) - * init_value - ) - up_param = nn.Parameter( - torch.ones( - (dim, 1), - device=self.dev, - dtype=self.dtype, - ) - * init_value - ) - - m.register_parameter('buf_upbound_factor', up_param) - m.register_parameter('buf_lowbound_factor', low_param) - m.dynamic_quant_weight = True - - def register_let_parameters(self, block): - block.register_parameter( - 'qkt_smooth_scale', - nn.Parameter( - torch.ones( - block.self_attn.q_proj.out_features, - device=self.dev, - dtype=self.dtype, - ) - ), - ) - - llmc_ln_module = _MODEL_LN_TYPES_PAIRS_[self.config['model']['type']] - self.model.replace_module_block(llmc_ln_module, block, self.block_idx, {}) - - for n, m in block.named_modules(): - if isinstance(m, FakeQuantLinear): - for key in self.model.pairs.keys(): - if key in n: - scale, shift = self.get_weight_scale_shift(m, n) - if shift is not None: - block.register_parameter( - f'{self.model.pairs[key]}_smooth_shift', - nn.Parameter(shift), - ) - else: - block.register_buffer( - f'{self.model.pairs[key]}_smooth_shift', None - ) - block.register_parameter( - f'{self.model.pairs[key]}_smooth_scale', nn.Parameter(scale) - ) - - m.dynamic_quant_weight = False - m.dynamic_quant_tmp_weight = True - - def get_clip_parameters(self, input_feat, n, m): - if any([_ in n for _ in ['q_', 'k_', 'query', 'key', 'Wqkv']]): - up_param = None - low_param = None - return low_param, up_param - - if self.load_clip: - logger.info('Load Searched clip...') - logger.info(f'clip layer {n}') - layer_name = f'{self.model.block_name_prefix}.{self.block_idx}.{n}' - logger.info(layer_name) - up_factor = self.weight_clips[layer_name]['up_factor'].float().cuda() - - low_factor = self.weight_clips[layer_name]['low_factor'] - if low_factor is not None: - low_factor = low_factor.float().cuda() - - else: - logger.info('Search clip ...') - if len(input_feat[n]) != 1: - inputs = [torch.cat(input_feat[n])] - else: - inputs = input_feat[n] - - max_val, min_val = self.auto_clipper.auto_clip_layer( - self.block_idx, - n, - m.weight.data, - inputs, - n_sample_token=self.config.calib.get('seq_len', None), - ) - - up_factor, low_factor = self.auto_clipper.get_clip_factor(self.block_idx, - m, - min_val, - max_val, - n) - - up_param = nn.Parameter(up_factor) - low_param = nn.Parameter(low_factor) - - return low_param, up_param - - def get_layer_norms(self, block): - layer_norms = [] - for n, m in block.named_modules(): - if isinstance(m, tuple(_LLMC_LN_TYPES_)): - layer_norms.append(m) - return layer_norms - - def get_lwc_parameters(self, block): - params = [] - for n, m in block.named_parameters(): - if n.find('bound_factor') > -1: - params.append(m) - return iter(params) - - def get_let_parameters(self, block): - params = [] - template = 'smooth' if self.use_shift else 'smooth_scale' - for n, m in block.named_parameters(): - if n.find(template) > -1: - params.append(m) - return iter(params) - - def get_omni_parameters(self, block): - params = [] - template = 'smooth' if self.use_shift else 'smooth_scale' - for n, m in block.named_parameters(): - if n.find('bound_factor') > -1 or n.find(template) > -1: - params.append(m) - return iter(params) - - def get_act_scale_shift(self, stat='scales'): - self.model.model.eval() - - act_stat = {} - - def get_tensor_scale(name, tensor): - hidden_dim = tensor.shape[-1] - tensor = tensor.view(-1, hidden_dim).abs().detach() - comming_max = torch.max(tensor, dim=0)[0].float().cpu() - if name in act_stat: - act_stat[name] = torch.max(act_stat[name], comming_max) - else: - act_stat[name] = comming_max - - def get_tensor_shift(name, tensor): - hidden_dim = tensor.shape[-1] - tensor = tensor.view(-1, hidden_dim).detach() - comming_max = torch.max(tensor, dim=0)[0].float().cpu() - comming_min = torch.min(tensor, dim=0)[0].float().cpu() - if name in act_stat: - act_stat[name] = 0.99 * act_stat[name] + 0.01 * ( - (comming_max + comming_min) / 2 - ) - else: - act_stat[name] = (comming_max + comming_min) / 2 - - def stat_input_hook(m, x, y, name): - if isinstance(x, tuple): - x = x[0] - if stat == 'scales': - get_tensor_scale(name, x) - elif stat == 'shifts': - get_tensor_shift(name, x) - - hooks = [] - for name, m in self.model.model.named_modules(): - if isinstance(m, nn.Linear): - hooks.append( - m.register_forward_hook( - functools.partial(stat_input_hook, name=name) - ) - ) - - with torch.no_grad(): - for i in tqdm(range(len(self.blocks))): - block = self.blocks[i] - block.cuda() - if i == 0: - fp_inps = self.block_forward(block) - else: - fp_inps = self.block_forward(block, fp_inps) - - block.cpu() - - for h in hooks: - h.remove() - gc.collect() - torch.cuda.empty_cache() - - return act_stat - - def get_weight_scale_shift(self, layer, name): - if f'{self.prefix}.{self.block_idx}.{name}' not in self.act_scales: - act = None - else: - act = ( - self.act_scales[f'{self.prefix}.{self.block_idx}.{name}'] - .to( - device=self.dev, - dtype=self.dtype, - ) - .clamp(min=1e-5) - ) - - weight = layer.weight.data.max(dim=0)[0].clamp(min=1e-5) - - if act is not None: - scale = (act.pow(self.alpha) / weight.half().pow(1 - self.alpha)).clamp( - min=1e-5 - ) - - if self.use_shift: - shift = self.act_shifts[f'{self.prefix}.{self.block_idx}.{name}'].to( - device=self.dev, - dtype=self.dtype, - ) - else: - shift = None - - if self.search_scale_init: - return act, shift - else: - return scale, shift - - def truncate(self, num, threshold=1e-2): - return TruncateFunction.apply(num, threshold) - - def clear_let_parameters(self, block): - template = 'smooth' if self.use_shift else 'smooth_scale' - for n, _ in list(block.named_parameters()): - if template in n: - delattr(block, n) - - def clear_tmp(self, block): - for n, m in block.named_modules(): - if isinstance(m, FakeQuantLinear): - del m.tmp_weight - del m.tmp_bias - m.dynamic_quant_weight = False - m.dynamic_quant_tmp_weight = False - if self.lwc: - if m.buf_lowbound_factor is not None: - m.buf_upbound_factor.requires_grad = False - m.buf_lowbound_factor.requires_grad = False - - if self.let: - self.clear_let_parameters(block) - - def smooth_tmp_weight(self, block): - subsets = self.model.get_subsets_in_block(block) - with torch.no_grad(): - for n, m in block.named_parameters(): - if 'smooth_scale' in n: - m.data = self.truncate(m) - - layer_norms = self.get_layer_norms(block) - - qkv_layers = [subsets[0]['layers'][name] for name in subsets[0]['layers']] - - self.smooth_ln_fcs_tmp( - layer_norms[0], - qkv_layers, - block.qkv_smooth_scale, - block.qkv_smooth_shift, - ) - self.smooth_ln_fcs_tmp( - layer_norms[1], - [subsets[2]['layers'][name] for name in subsets[2]['layers']], - block.fc1_smooth_scale, - block.fc1_smooth_shift, - ) - self.smooth_fc_fc_tmp( - subsets[1]['prev_op'][0], - subsets[1]['inspect'], - block.out_smooth_scale, - block.out_smooth_shift, - ) - - if self.smooth_up_down: - self.smooth_fc_fc_tmp( - subsets[3]['prev_op'][0], - subsets[3]['inspect'], - block.down_smooth_scale, - None, - ) - - self.smooth_q_k_tmp(qkv_layers[0], qkv_layers[1], block.qkt_smooth_scale) - subsets[3]['inspect'].tmp_weight = subsets[3]['inspect'].weight - - for name, module in block.named_modules(): - if isinstance(module, FakeQuantLinear): - if not hasattr(module, 'tmp_bias'): - module.tmp_bias = module.bias - - def smooth_ln_fcs_tmp(self, ln, fcs, scales, shifts): - ln.use_tmp_parameter = True - if not isinstance(fcs, list): - fcs = [fcs] - - if shifts is not None: - if hasattr(ln, 'bias') and ln.bias is not None: - ln.tmp_bias = (ln.bias - shifts) / scales - else: - ln.tmp_bias = (-1 * shifts) / scales - - ln.tmp_weight = ln.weight / scales - - for fc in fcs: - if shifts is not None: - if hasattr(fc, 'bias') and fc.bias is not None: - fc.tmp_bias = fc.bias + fc.weight @ shifts - else: - fc.tmp_bias = fc.weight @ shifts - fc.tmp_weight = fc.weight * scales.view(1, -1) - - def smooth_fc_fc_tmp(self, fc1, fc2, scales, shifts): - if fc1.out_features != fc2.in_features: - fc1.tmp_weight = fc1.weight - fc2.tmp_weight = fc2.weight - return - - if hasattr(fc1, 'tmp_weight'): - if hasattr(fc1, 'tmp_bias') and fc1.tmp_bias is not None: - if shifts is not None: - fc1.tmp_bias = fc1.tmp_bias - shifts - fc1.tmp_bias = fc1.tmp_bias / scales.view(-1) - fc1.tmp_weight = fc1.tmp_weight / scales.view(-1, 1) - else: - if hasattr(fc1, 'bias') and fc1.bias is not None: - fc1.tmp_bias = fc1.bias / scales.view(-1) - fc1.tmp_weight = fc1.weight / scales.view(-1, 1) - - if shifts is not None: - if hasattr(fc2, 'bias') and fc2.bias is not None: - fc2.tmp_bias = fc2.bias + fc2.weight @ shifts - else: - fc2.tmp_bias = fc2.weight @ shifts - - fc2.tmp_weight = fc2.weight * scales.view(1, -1) - - def smooth_q_k_tmp(self, q_proj, k_proj, scales): - if q_proj.tmp_weight.shape != k_proj.tmp_weight.shape: - return - - q_proj.tmp_weight = q_proj.tmp_weight / scales.view(-1, 1) - if hasattr(q_proj, 'tmp_bias') and q_proj.tmp_bias is not None: - q_proj.tmp_bias = q_proj.tmp_bias / scales.view(-1) - k_proj.tmp_weight = k_proj.tmp_weight * scales.view(-1, 1) - if hasattr(k_proj, 'tmp_bias') and k_proj.tmp_bias is not None: - k_proj.tmp_bias = k_proj.tmp_bias * scales.view(-1) - - def smooth_q_k_inplace(self, block): - for name, module in block.named_modules(): - if isinstance(module, tuple(_LLMC_LN_TYPES_)): - module.use_tmp_parameter = False - - if block.self_attn.q_proj.weight.shape != block.self_attn.k_proj.weight.shape: - return - - scales = block.qkt_smooth_scale - scales.data = self.truncate(scales) - block.self_attn.q_proj.weight.div_(scales.view(-1, 1)) - if block.self_attn.q_proj.bias is not None: - block.self_attn.q_proj.bias.div_(scales.view(-1)) - block.self_attn.k_proj.weight.mul_(scales.view(-1, 1)) - if block.self_attn.k_proj.bias is not None: - block.self_attn.k_proj.bias.mul_(scales.view(-1)) - - def w_qdq(self, module, wquantizer): - args = {'lowbound_factor': None, 'upbound_factor': None} - if hasattr(module, 'buf_lowbound_factor'): - args['lowbound_factor'] = module.buf_lowbound_factor - if hasattr(module, 'buf_upbound_factor'): - args['upbound_factor'] = module.buf_upbound_factor - - if module.dynamic_quant_weight: - return wquantizer.fake_quant_weight_dynamic(module.weight, args) - - elif module.dynamic_quant_tmp_weight: - return wquantizer.fake_quant_weight_dynamic(module.tmp_weight, args) - else: - return wquantizer.fake_quant_weight_dynamic(module.weight, args) - - def deploy(self, quant_format): - super().deploy(quant_format) - self.model.convert_dtype(self.model_dtype) - - def save_model(self, path): - self.model.convert_dtype(self.model_dtype) - super().save_model(path) diff --git a/llmc/compression/quantization/osplus.py b/llmc/compression/quantization/osplus.py deleted file mode 100755 index d5efb1cb5..000000000 --- a/llmc/compression/quantization/osplus.py +++ /dev/null @@ -1,235 +0,0 @@ -import functools -import gc -from collections import defaultdict - -import torch -import torch.nn as nn -from loguru import logger - -from llmc.utils.registry_factory import ALGO_REGISTRY - -from .base_blockwise_quantization import BaseBlockwiseQuantization -from .module_utils import (_LLMC_LINEAR_TYPES_, _LLMC_LN_TYPES_, - _TRANSFORMERS_LINEAR_TYPES_, - _TRANSFORMERS_LN_TYPES_, FakeQuantLinear, - OriginFloatLinear) -from .utils import is_fp8_supported_gpu - -if is_fp8_supported_gpu(): - from .kernel import weight_cast_to_bf16, weight_cast_to_fp8 - logger.info('Successfully imported Triton kernel.') -else: - from .quant import weight_cast_to_bf16, weight_cast_to_fp8 - logger.info( - 'Triton kernel not available: non-Hopper GPU detected.\n' - 'Using LLMC Quantizer implementation instead.' - ) - - -@ALGO_REGISTRY -class OsPlus(BaseBlockwiseQuantization): - def __init__(self, model, quant_config, input, padding_mask, config): - torch.set_grad_enabled(False) - super().__init__(model, quant_config, input, padding_mask, config) - - @torch.no_grad() - def filter_subset(self, prev_op): - if isinstance(prev_op[0], tuple(_LLMC_LN_TYPES_ + _TRANSFORMERS_LN_TYPES_)): - return True - else: - return False - - @torch.no_grad() - def get_original_out(self, x, inspect_module, subset_kwargs): - with torch.no_grad(): - org_out = inspect_module(x, **subset_kwargs) - if isinstance(org_out, tuple): - org_out = org_out[0] - return org_out - - @torch.no_grad() - def search_scale_shift_subset( - self, layers, input_feats, inspect_module, subset_kwargs - ): - org_sd = {k: v.cpu() for k, v in inspect_module.state_dict().items()} - org_out_dict = {} - - for i in range(len(input_feats)): - input_feats[i] = input_feats[i].to(next(inspect_module.parameters()).device) - x = input_feats[i] - - if self.model.has_bias(): - if x.dim() == 3: - cmx = torch.amax(x, dim=(0, 1)) - cmn = torch.amin(x, dim=(0, 1)) - elif x.dim() == 2: - cmx = torch.amax(x, dim=0) - cmn = torch.amin(x, dim=0) - shift = (cmx + cmn) / 2 - else: - shift = None - - if isinstance(subset_kwargs, list): - kwargs = subset_kwargs[i] - else: - kwargs = subset_kwargs - - if len(input_feats) == 1: - org_out = self.get_original_out(x, inspect_module, kwargs) - else: - if i in org_out_dict: - org_out = org_out_dict[i] - else: - org_out = self.get_original_out(x, inspect_module, kwargs) - org_out_dict[i] = org_out - - if self.model.has_bias(): - x_shift = x - shift - else: - x_shift = x.clone() - - if x.dim() == 3: - cmx = torch.amax(x_shift, dim=(0, 1)) - cmn = torch.amin(x_shift, dim=(0, 1)) - elif x.dim() == 2: - cmx = torch.amax(x_shift, dim=0) - cmn = torch.amin(x_shift, dim=0) - amx = max( - x_shift.max(), torch.tensor(0.0, dtype=x_shift.dtype).to(x_shift.device) - ) - amn = min( - x_shift.min(), torch.tensor(0.0, dtype=x_shift.dtype).to(x_shift.device) - ) - - if torch.isnan(amx): - num = 100 - else: - num = max(100, int(amx / 0.5)) - - best_loss = None - bounds = (1.0, max(-amn.item(), amx.item())) - step = (bounds[1] - bounds[0]) / num - - best_min_range = -bounds[1] - best_max_range = bounds[1] - st = bounds[1] - cnt = 0 - while st >= bounds[0]: - min_range = torch.tensor(-st, dtype=x_shift.dtype).to(x_shift.device) - max_range = torch.tensor(st, dtype=x_shift.dtype).to(x_shift.device) - - mx_scale = torch.where( - cmx > max_range, - cmx / max_range, - torch.tensor(1.0, dtype=x_shift.dtype).to(x_shift.device), - ) - mn_scale = torch.where( - cmn < min_range, - cmn / min_range, - torch.tensor(1.0, dtype=x_shift.dtype).to(x_shift.device), - ) - cur_scale = torch.max(mx_scale, mn_scale) - - for fc in layers: - if self.model.has_bias(): - fc.bias.data += shift @ fc.weight.data.T - - if fc.weight.data.dtype == torch.float8_e4m3fn: - tmp_weight_data \ - = weight_cast_to_bf16(fc.weight.data, - fc.weight_scale_inv.data, - self.fp8_block_size).to(torch.bfloat16) - else: - tmp_weight_data = fc.weight.data - - tmp_weight_data.mul_(cur_scale.view(1, -1)) - tmp_weight_data = self.wquantizer.fake_quant_weight_dynamic( - tmp_weight_data - ) - - if fc.weight.data.dtype == torch.float8_e4m3fn: - fc.weight.data, fc.weight_scale_inv.data \ - = weight_cast_to_fp8(tmp_weight_data, self.fp8_block_size) - else: - fc.weight.data = tmp_weight_data - - x_shift_tmp = x_shift / cur_scale.view(1, -1) - q_x = self.aquantizer.fake_quant_act_dynamic(x_shift_tmp) - - out = inspect_module(q_x, **kwargs) - if isinstance(out, tuple): - out = out[0] - - loss = (org_out - out).pow(2).sum(-1).mean() - - if best_loss is None or best_loss > loss: - best_loss = loss - best_min_range = -st - best_max_range = st - cnt += 1 - st -= step - inspect_module.load_state_dict(org_sd) - - best_min_range = torch.tensor(best_min_range, dtype=x_shift.dtype).to( - x_shift.device - ) - best_max_range = torch.tensor(best_max_range, dtype=x_shift.dtype).to( - x_shift.device - ) - - mn_scale = torch.where( - cmn < best_min_range, - cmn / best_min_range, - torch.tensor(1.0, dtype=x_shift.dtype).to(x_shift.device), - ) - mx_scale = torch.where( - cmx > best_max_range, - cmx / best_max_range, - torch.tensor(1.0, dtype=x_shift.dtype).to(x_shift.device), - ) - - best_scale = torch.max(mx_scale, mn_scale) - - del org_out_dict - gc.collect() - torch.cuda.empty_cache() - return best_scale, shift - - @torch.no_grad() - def subset_transform( - self, - subset, - input_feat, - subset_kwargs, - ): - layers_dict = subset['layers'] - prev_op = subset['prev_op'] - input_name = subset['input'][0] - inspect_module = subset['inspect'] - - assert ( - len(prev_op) == 1 - ), 'Only support single prev_op. If multi prev_ops, code need to be updated.' - - layers = list(layers_dict.values()) - if ( - isinstance( - prev_op[0], tuple(_LLMC_LINEAR_TYPES_ + _TRANSFORMERS_LINEAR_TYPES_) - ) - and prev_op[0].out_features != layers[0].in_features * 3 - and prev_op[0].out_features != layers[0].in_features - ): - logger.info('Cannot apply scale. Do not transform this subset.') - return - - if not self.filter_subset(prev_op): - logger.info('Do not transform this subset.') - return - - scale, shift = self.search_scale_shift_subset( - layers, input_feat[input_name], inspect_module, subset_kwargs - ) - self.apply_shift(shift, prev_op, layers) - self.apply_scale(scale, prev_op, layers) - if self.act_static: - self.update_input_feat(scale, input_feat, layers_dict) diff --git a/llmc/compression/quantization/quant.py b/llmc/compression/quantization/quant.py deleted file mode 100755 index 2c24c03a8..000000000 --- a/llmc/compression/quantization/quant.py +++ /dev/null @@ -1,1369 +0,0 @@ -import gc - -import torch -from loguru import logger - -from .utils import ceil_div - -try: - from qtorch.quant import float_quantize -except Exception: - logger.warning( - 'qtorch not found, please install qtorch.' - 'Please install qtorch (pip install qtorch).' - ) - float_quantize = None - - -def weight_cast_to_bf16(weight, scale, block_size): - quantizer = FloatQuantizer( - bit='e4m3', - symmetric=True, - granularity='per_block', - block_size=block_size, - use_qtorch=True, - ) - scale = scale.view(scale.shape[0], 1, scale.shape[1], 1) - org_shape = weight.shape - weight = quantizer.reshape_tensor(weight) - weight = quantizer.dequant(weight.float(), scale, 0) - weight = quantizer.restore_tensor(weight, org_shape) - return weight.to(torch.bfloat16) - - -def weight_cast_to_fp8(weight, block_size): - quantizer = FloatQuantizer( - bit='e4m3', - symmetric=True, - granularity='per_block', - block_size=block_size, - use_qtorch=True, - ) - fp8_weight, fp8_scale, _ = quantizer.real_quant_weight_dynamic(weight) - return fp8_weight, fp8_scale - - -class BaseQuantizer(object): - def __init__(self, bit, symmetric, granularity, **kwargs): - self.bit = bit - self.sym = symmetric - self.granularity = granularity - self.kwargs = kwargs - - self.calib_algo = self.kwargs.get('calib_algo', 'minmax') - - if self.granularity == 'per_group': - self.group_size = self.kwargs['group_size'] - elif self.granularity == 'per_head': - self.head_num = self.kwargs['head_num'] - elif self.granularity == 'per_block': - assert self.calib_algo == 'minmax' and self.sym - self.block_size = self.kwargs['block_size'] - - if self.kwargs.get('ste', False): - self.round_func = lambda x: (x.round() - x).detach() + x - else: - self.round_func = torch.round - if 'ste_all' in self.kwargs and self.kwargs['ste_all']: - self.round_func = torch.round - self.ste_all = True - else: - self.ste_all = False - - self.round_zp = self.kwargs.get('round_zp', True) - self.sigmoid = torch.nn.Sigmoid() - - # mse config - self.mse_b_num = self.kwargs.get('mse_b_num', 1) - self.maxshrink = self.kwargs.get('maxshrink', 0.8) - self.mse_grid = self.kwargs.get('mse_grid', 100) - - # hist config - self.bins = self.kwargs.get('bins', 2048) - self.upsample_rate = ( - 16 # used to reduce quantization errors when upscaling histogram - ) - - # hqq config - self.lp_norm = self.kwargs.get('lp_norm', 0.7) - self.beta = self.kwargs.get('beta', 10) - self.kappa = self.kwargs.get('kappa', 1.01) - self.iters = self.kwargs.get('iters', 20) - if self.lp_norm == 1: - self.shrink_op = lambda x, beta: torch.sign(x) * torch.nn.functional.relu( - torch.abs(x) - 1.0 / self.beta - ) - else: - self.shrink_op = lambda x, beta, p=self.lp_norm: torch.sign( - x - ) * torch.nn.functional.relu( - torch.abs(x) - (1.0 / self.beta) * torch.pow(torch.abs(x), p - 1) - ) - - def reshape_batch_tensors(self, act_tensors): - assert len(act_tensors) > 0, ( - 'Calibration data is insufficient. Please provide more data to ensure ' - 'all experts in the MOE receive an adequate number of tokens.' - ) - - if isinstance(act_tensors[0], tuple): - # Handle multiple inputs by stacking tensors. - unzipped_inputs = zip(*act_tensors) - act_tensors = [torch.stack(tensor_list) for tensor_list in unzipped_inputs] - else: - if len(act_tensors) == 1: - # Handle batch-size=-1 case. - tensor_list = [act_tensors[0][i] for i in range(act_tensors[0].size(0))] - act_tensors[0] = tensor_list - else: - act_tensors = [act_tensors] - return act_tensors - - def get_tensor_range(self, tensor, args={}): - if self.calib_algo == 'minmax': - return self.get_minmax_range(tensor) - elif self.calib_algo == 'mse': - return self.get_mse_range(tensor) - elif self.calib_algo == 'learnable': - return self.get_learnable_range(tensor, **args) - else: - return self.get_minmax_range(tensor) - - def get_minmax_range(self, tensor): - if self.granularity == 'per_tensor': - max_val = torch.max(tensor) - min_val = torch.min(tensor) - elif self.granularity == 'per_block': - min_val = tensor.abs().float().amin(dim=(1, 3), keepdim=True) - max_val = tensor.abs().float().amax(dim=(1, 3), keepdim=True) - else: - max_val = tensor.amax(dim=-1, keepdim=True) - min_val = tensor.amin(dim=-1, keepdim=True) - - return (min_val, max_val) - - def get_mse_range(self, tensor, norm=2.4, bs=256): - - assert ( - self.mse_b_num >= 1 and tensor.shape[0] % self.mse_b_num == 0 - ), 'Batch number must be divisible by tensor.shape[0],' - bs = tensor.shape[0] // self.mse_b_num - tensor = tensor.float() - min_val, max_val = self.get_minmax_range(tensor) - - dev = tensor.device - - for b_num in range(self.mse_b_num): - _tensor = tensor[b_num * bs : (b_num + 1) * bs, :] - _min_val, _max_val = ( - min_val[b_num * bs : (b_num + 1) * bs, :], - max_val[b_num * bs : (b_num + 1) * bs, :], - ) - - best = torch.full([_tensor.shape[0]], float('inf'), device=dev) - - best_min_val, best_max_val = _min_val, _max_val - - for i in range(int(self.maxshrink * self.mse_grid)): - p = 1 - i / self.mse_grid - - xmin = p * _min_val - xmax = p * _max_val - - if self.quant_type == 'float-quant' and not self.use_qtorch: - clip_tensor, scales = self.get_float_qparams( - _tensor, (xmin, xmax), dev - ) - zeros, qmin, qmax = 0, None, None - q_tensor = self.quant_dequant( - clip_tensor, scales, zeros, qmax, qmin - ) - - else: - scales, zeros, qmax, qmin = self.get_qparams((xmin, xmax), dev) - q_tensor = self.quant_dequant(_tensor, scales, zeros, qmax, qmin) - - q_tensor -= _tensor - q_tensor.abs_() - q_tensor.pow_(norm) - err = torch.sum(q_tensor, 1) - - tmp = err < best - - if torch.any(tmp): - best[tmp] = err[tmp] - best_min_val[tmp] = xmin[tmp] - best_max_val[tmp] = xmax[tmp] - - ( - min_val[b_num * bs : (b_num + 1) * bs, :], - max_val[b_num * bs : (b_num + 1) * bs, :], - ) = (best_min_val, best_max_val) - - return (min_val, max_val) - - def get_learnable_range(self, tensor, lowbound_factor=None, upbound_factor=None): - min_val, max_val = self.get_minmax_range(tensor) - if self.sym: - if upbound_factor is not None: - abs_max = torch.max(max_val.abs(), min_val.abs()) - abs_max = abs_max.clamp(min=1e-5) - abs_max = self.sigmoid(upbound_factor) * abs_max - min_val = -abs_max - max_val = abs_max - else: - if upbound_factor is not None and lowbound_factor is not None: - min_val = self.sigmoid(lowbound_factor) * min_val - max_val = self.sigmoid(upbound_factor) * max_val - - return (min_val, max_val) - - def get_minmax_stats(self, act_tensors): - stats_min_max = {} - for input_idx, tensors in enumerate(act_tensors): - for tensor in tensors: - tensor = self.reshape_tensor(tensor) - tensor_range = self.get_minmax_range(tensor) - min_val, max_val = tensor_range[0], tensor_range[1] - - if input_idx not in stats_min_max: - stats_min_max[input_idx] = {} - stats_min_max[input_idx]['min'] = torch.tensor( - [min_val], dtype=torch.float32 - ) - stats_min_max[input_idx]['max'] = torch.tensor( - [max_val], dtype=torch.float32 - ) - else: - stats_min_max[input_idx]['min'] = torch.cat( - [ - stats_min_max[input_idx]['min'], - torch.tensor([min_val], dtype=torch.float32), - ] - ) - stats_min_max[input_idx]['max'] = torch.cat( - [ - stats_min_max[input_idx]['max'], - torch.tensor([max_val], dtype=torch.float32), - ] - ) - - return stats_min_max - - def get_static_minmax_range(self, act_tensors): - act_tensors = self.reshape_batch_tensors(act_tensors) - stats_min_max = self.get_minmax_stats(act_tensors) - min_vals, max_vals = [], [] - for input_idx, tensor_range in stats_min_max.items(): - min_val = tensor_range['min'].mean() - max_val = tensor_range['max'].mean() - min_vals.append(min_val) - max_vals.append(max_val) - - return min_vals, max_vals - - def get_norm( - self, delta_begin: torch.Tensor, delta_end: torch.Tensor, density: torch.Tensor - ) -> torch.Tensor: - r"""Compute the norm of the values uniformaly distributed between - delta_begin and delta_end. Currently only L2 norm is supported. - - norm = density * (integral_{begin, end} x^2) - = density * (end^3 - begin^3) / 3 - """ - norm = ( - delta_end * delta_end * delta_end - delta_begin * delta_begin * delta_begin - ) / 3 - return density * norm - - def get_quantization_error( - self, histogram, min_val, max_val, next_start_bin, next_end_bin - ): - r"""Compute the quantization error if we use start_bin to end_bin as - the min and max to do the quantization.""" - bin_width = (max_val.item() - min_val.item()) / self.bins - - dst_bin_width = bin_width * (next_end_bin - next_start_bin + 1) / self.dst_nbins - if dst_bin_width == 0.0: - return 0.0 - - src_bin = torch.arange(self.bins, device=histogram.device) - # distances from the beginning of first dst_bin to the beginning and - # end of src_bin - src_bin_begin = (src_bin - next_start_bin) * bin_width - src_bin_end = src_bin_begin + bin_width - - # which dst_bins the beginning and end of src_bin belong to? - dst_bin_of_begin = torch.clamp( - torch.div(src_bin_begin, dst_bin_width, rounding_mode='floor'), - 0, - self.dst_nbins - 1, - ) - dst_bin_of_begin_center = (dst_bin_of_begin + 0.5) * dst_bin_width - - dst_bin_of_end = torch.clamp( - torch.div(src_bin_end, dst_bin_width, rounding_mode='floor'), - 0, - self.dst_nbins - 1, - ) - density = histogram / bin_width - - norm = torch.zeros(self.bins, device=histogram.device) - - delta_begin = src_bin_begin - dst_bin_of_begin_center - delta_end = dst_bin_width / 2 - norm += self.get_norm( - delta_begin, - torch.ones(self.bins, device=histogram.device) * delta_end, - density, - ) - - norm += (dst_bin_of_end - dst_bin_of_begin - 1) * self.get_norm( - torch.tensor(-dst_bin_width / 2), torch.tensor(dst_bin_width / 2), density - ) - - dst_bin_of_end_center = dst_bin_of_end * dst_bin_width + dst_bin_width / 2 - - delta_begin = -dst_bin_width / 2 - delta_end = src_bin_end - dst_bin_of_end_center - norm += self.get_norm(torch.tensor(delta_begin), delta_end, density) - - return norm.sum().item() - - def _upscale_histogram(self, histogram, orig_min, orig_max, update_min, update_max): - # this turns the histogram into a more fine-coarsed histogram to reduce - # bin quantization errors - histogram = histogram.repeat_interleave(self.upsample_rate) / self.upsample_rate - bin_size = (orig_max - orig_min) / (self.bins * self.upsample_rate) - mid_points_histogram = ( - torch.linspace( - orig_min, - orig_max, - self.bins * self.upsample_rate + 1, - device=orig_min.device, - )[:-1].to(histogram.device) - + 0.5 * bin_size - ) - boundaries_new_histogram = torch.linspace( - update_min, update_max, self.bins + 1, device=update_min.device - ).to(histogram.device) - # this maps the mid-poits of the histogram to the new histogram's space - bucket_assignments = ( - torch.bucketize(mid_points_histogram, boundaries_new_histogram, right=True) - - 1 - ) - # this then maps the histogram mid-points in the new space, - # weighted by the original histogram's values - # this is just the old histogram in the new histogram's space - - # In case due to numerical issues the values land higher/lower than the maximum/minimum - bucket_assignments[bucket_assignments >= self.bins] = self.bins - 1 - bucket_assignments[bucket_assignments < 0] = 0 - - update_histogram = torch.bincount( - bucket_assignments, weights=histogram, minlength=self.bins - ) - return update_histogram - - def _combine_histograms( - self, orig_hist, orig_min, orig_max, update_hist, update_min, update_max - ): - # If the new min and max are the same as the current min and max, - # we can just add the new histogram to the original histogram - if update_min == orig_min and update_max == orig_max: - return orig_hist + update_hist - - # If the orig hist only has one value (i.e., the min and max are the same) - # we can just add it into new histogram - if orig_min == orig_max: - bin_value = torch.sum(update_hist) - transformed_orig_hist = ( - torch.histc( - orig_min, bins=self.bins, min=update_min, max=update_max - ) # type: ignore[arg-type] - * bin_value - ) - return transformed_orig_hist + update_hist - - # We assume the update_hist is already in the target range, we will map the orig_max to it - assert update_min <= orig_min - assert update_max >= orig_max - - # Now we need to turn the old_histogram, into the range of the new histogram - transformed_orig_hist = self._upscale_histogram( - orig_hist, - orig_min, - orig_max, - update_min, - update_max, - ) - - return update_hist + transformed_orig_hist - - def get_hist_threshold(self, histogram, min_val, max_val): - - assert histogram.size()[0] == self.bins, 'bins mismatch' - bin_width = (max_val - min_val) / self.bins - - # cumulative sum - total = torch.sum(histogram).item() - cSum = torch.cumsum(histogram, dim=0) - - stepsize = 1e-8 - alpha = 0.0 # lower bound - beta = 1.0 # upper bound - start_bin = 0 - end_bin = self.bins - 1 - norm_min = float('inf') - - while alpha < beta: - # Find the next step - next_alpha = alpha + stepsize - next_beta = beta - stepsize - - # find the left and right bins between the quantile bounds - left = start_bin - right = end_bin - while left < end_bin and cSum[left] < next_alpha * total: - left = left + 1 - while right > start_bin and cSum[right] > next_beta * total: - right = right - 1 - - # decide the next move - next_start_bin = start_bin - next_end_bin = end_bin - if (left - start_bin) > (end_bin - right): - # move the start bin - next_start_bin = left - alpha = next_alpha - else: - # move the end bin - next_end_bin = right - beta = next_beta - - if next_start_bin == start_bin and next_end_bin == end_bin: - continue - - # calculate the quantization error using next_start_bin and next_end_bin - norm = self.get_quantization_error( - histogram, min_val, max_val, next_start_bin, next_end_bin - ) - - if norm > norm_min: - break - norm_min = norm - start_bin = next_start_bin - end_bin = next_end_bin - - new_min = min_val + bin_width * start_bin - new_max = min_val + bin_width * (end_bin + 1) - return new_min, new_max - - def get_static_hist_range(self, act_tensors): - act_tensors = self.reshape_batch_tensors(act_tensors) - stats_min_max = self.get_minmax_stats(act_tensors) - min_vals, max_vals = [], [] - histograms = [] - for input_idx, tensors in enumerate(act_tensors): - min_val, max_val = None, None - histogram = torch.zeros(self.bins) - tensor_range = stats_min_max[input_idx] - for idx, tensor in enumerate(tensors): - tensor = tensor.float() - x_min, x_max = tensor_range['min'][idx], tensor_range['max'][idx] - if min_val is None or max_val is None: - new_histogram = torch.histc( - tensor, self.bins, min=x_min.item(), max=x_max.item() - ) - histogram.detach_().resize_(new_histogram.shape) - histogram.copy_(new_histogram) - - min_val, max_val = x_min, x_max - else: - current_min, current_max = min_val, max_val - update_min, update_max = x_min, x_max - new_min = torch.min(current_min, update_min) - new_max = torch.max(current_max, update_max) - - update_histogram = torch.histc( - tensor, self.bins, min=new_min.item(), max=new_max.item() - ).to(histogram.device) - - if new_min == current_min and new_max == current_max: - combined_histogram = histogram + update_histogram - histogram.detach_().resize_(combined_histogram.shape) - histogram.copy_(combined_histogram) - else: - combined_histogram = self._combine_histograms( - histogram, - current_min, - current_max, - update_histogram, - new_min, - new_max, - ) - histogram.detach_().resize_(combined_histogram.shape) - histogram.copy_(combined_histogram) - - min_val, max_val = new_min, new_max - - min_vals.append(min_val) - max_vals.append(max_val) - histograms.append(histogram) - - new_min_vals, new_max_vals = [], [] - for i in range(len(histograms)): - histogram = histograms[i] - min_val, max_val = min_vals[i], max_vals[i] - new_min, new_max = self.get_hist_threshold(histogram, min_val, max_val) - new_min_vals.append(new_min) - new_max_vals.append(new_max) - - return new_min_vals, new_max_vals - - def get_static_moving_minmax_range(self, act_tensors, alpha): - act_tensors = self.reshape_batch_tensors(act_tensors) - moving_min_vals, moving_max_vals = [], [] - for tensors in act_tensors: - moving_min_val, moving_max_val = None, None - for tensor in tensors: - tensor = self.reshape_tensor(tensor) - tensor_range = self.get_minmax_range(tensor) - min_val, max_val = tensor_range[0], tensor_range[1] - - if moving_min_val is None or moving_max_val is None: - moving_min_val = min_val - moving_max_val = max_val - else: - moving_min_val = moving_min_val + alpha * (min_val - moving_min_val) - moving_max_val = moving_max_val + alpha * (max_val - moving_max_val) - moving_min_vals.append(moving_min_val) - moving_max_vals.append(moving_max_val) - - return moving_min_vals, moving_max_vals - - def get_qparams(self, tensor_range, device): - min_val, max_val = tensor_range[0], tensor_range[1] - qmin = self.qmin.to(device) - qmax = self.qmax.to(device) - if self.sym: - abs_max = torch.max(max_val.abs(), min_val.abs()) - abs_max = abs_max.clamp(min=1e-5) - scales = abs_max / qmax - zeros = torch.tensor(0.0) - else: - scales = (max_val - min_val).clamp(min=1e-5) / (qmax - qmin) - zeros = (qmin - torch.round(min_val / scales)).clamp(qmin, qmax) - if not self.round_zp: - zeros = qmin - (min_val / scales) - return scales, zeros, qmax, qmin - - def get_batch_tensors_qparams(self, act_tensors, alpha=0.01, args={}): - scales_list, zeros_list, qmin_list, qmax_list = [], [], [], [] - - if self.calib_algo == 'static_hist': - assert ( - self.sym is True and self.granularity == 'per_tensor' - ), 'Only support per tensor static symmetric int quantize.' - min_vals, max_vals = self.get_static_hist_range(act_tensors) - elif self.calib_algo == 'static_minmax': - min_vals, max_vals = self.get_static_minmax_range(act_tensors) - elif self.calib_algo == 'static_moving_minmax': - min_vals, max_vals = self.get_static_moving_minmax_range(act_tensors, alpha) - else: - raise ValueError(f'Unsupported calibration algorithm: {self.calib_algo}') - - for i in range(len(min_vals)): - min_val, max_val = min_vals[i], max_vals[i] - scales, zeros, qmax, qmin = self.get_qparams( - (min_val, max_val), min_val.device - ) - scales_list.append(scales) - zeros_list.append(zeros) - qmin_list.append(qmin) - qmax_list.append(qmax) - - return scales_list, zeros_list, qmin_list, qmax_list - - def optimize_weights_proximal(self, tensor, scales, zeros, qmax, qmin): - best_error = 1e4 - current_beta = self.beta - current_kappa = self.kappa - scales = 1 / scales - for i in range(self.iters): - W_q = torch.round(tensor * scales + zeros).clamp(qmin, qmax) - W_r = (W_q - zeros) / scales - W_e = self.shrink_op(tensor - W_r, current_beta) - - zeros = torch.mean(W_q - (tensor - W_e) * scales, axis=-1, keepdim=True) - current_beta *= current_kappa - current_error = float(torch.abs(tensor - W_r).mean()) - - if current_error < best_error: - best_error = current_error - else: - break - - torch.cuda.empty_cache() - scales = 1 / scales - - return scales, zeros - - def reshape_tensor(self, tensor, allow_padding=False): - if self.granularity == 'per_group': - if tensor.shape[-1] >= self.group_size: - if tensor.shape[-1] % self.group_size == 0: - t = tensor.reshape(-1, self.group_size) - elif allow_padding: - deficiency = self.group_size - tensor.shape[1] % self.group_size - prefix = tensor.shape[:-1] - pad_zeros = torch.zeros( - (*prefix, deficiency), device=tensor.device, dtype=tensor.dtype - ) - t = torch.cat((tensor, pad_zeros), dim=-1).reshape( - -1, self.group_size - ) - else: - raise ValueError( - f'Dimension {tensor.shape[-1]} ' - f'not divisible by group size {self.group_size}' - ) - else: - t = tensor - elif self.granularity == 'per_head': - t = tensor.reshape(self.head_num, -1) - elif self.granularity == 'per_block': - m, n = tensor.shape - t_padded = torch.zeros((ceil_div(m, self.block_size) * self.block_size, ceil_div(n, self.block_size) * self.block_size), dtype=tensor.dtype, device=tensor.device) - t_padded[:m, :n] = tensor - t = t_padded.view(-1, self.block_size, t_padded.size(1) // self.block_size, self.block_size) - else: - t = tensor - return t - - def restore_tensor(self, tensor, shape): - if tensor.shape == shape: - t = tensor - elif self.granularity == 'per_block': - try: - t = tensor.reshape(-1, shape[-1])[:shape[0], :] - except RuntimeError: - t = tensor.reshape(shape[0], -1)[:, :shape[1]] - else: - try: - t = tensor.reshape(shape) - except RuntimeError: - deficiency = self.group_size - shape[1] % self.group_size - t = tensor.reshape(*shape[:-1], -1)[..., :-deficiency] - return t - - -class IntegerQuantizer(BaseQuantizer): - def __init__(self, bit, symmetric, granularity, **kwargs): - super().__init__(bit, symmetric, granularity, **kwargs) - self.quant_type = 'int-quant' - if 'int_range' in self.kwargs: - self.qmin = self.kwargs['int_range'][0] - self.qmax = self.kwargs['int_range'][1] - else: - if self.sym: - self.qmin = -(2 ** (self.bit - 1)) - self.qmax = 2 ** (self.bit - 1) - 1 - else: - self.qmin = 0.0 - self.qmax = 2**self.bit - 1 - - self.qmin = torch.tensor(self.qmin) - self.qmax = torch.tensor(self.qmax) - self.dst_nbins = 2**bit - - def get_hqq_qparams(self, tensor, args): - tensor = tensor.float() - tensor = self.reshape_tensor(tensor) - tensor_range = self.get_minmax_range(tensor) - scales, zeros, qmax, qmin = self.get_qparams(tensor_range, tensor.device) - best_scales, best_zeros = self.optimize_weights_proximal( - tensor, scales, zeros, qmax, qmin - ) - return tensor, best_scales, best_zeros, qmax, qmin - - def get_tensor_qparams(self, tensor, args={}): - if self.calib_algo == 'hqq': - return self.get_hqq_qparams(tensor, args) - else: - tensor = self.reshape_tensor(tensor) - tensor_range = self.get_tensor_range(tensor, args) - scales, zeros, qmax, qmin = self.get_qparams(tensor_range, tensor.device) - return tensor, scales, zeros, qmax, qmin - - def quant(self, tensor, scales, zeros, qmax, qmin): - if self.round_zp: - tensor = torch.clamp(self.round_func(tensor / scales) + zeros, qmin, qmax) - else: - tensor = torch.clamp( - self.round_func(tensor / scales.clamp_min(1e-9) + zeros), - qmin, - qmax, - ) - return tensor - - def dequant(self, tensor, scales, zeros): - tensor = (tensor - zeros) * scales - return tensor - - def quant_dequant(self, tensor, scales, zeros, qmax, qmin, output_scale_factor=1): - tensor = self.quant(tensor, scales, zeros, qmax, qmin) - tensor = self.dequant(tensor, scales * output_scale_factor, zeros) - return tensor - - def fake_quant_act_static(self, act, args={}): - if 'int_indices' in args: - q_act = act[:, :, args['int_indices']] - fp_act = act[:, :, args['fp_indices']] - else: - q_act = act - - if 'current_bit' in args: - org_bit = self.bit - self.bit = args['current_bit'] - - org_act_shape = q_act.shape - org_act_dtype = q_act.dtype - - scales, zeros, qmax, qmin = ( - args['scales'], - args['zeros'], - args['qmax'], - args['qmin'], - ) - q_act = self.reshape_tensor(q_act) - q_act = self.quant_dequant(q_act, scales, zeros, qmax, qmin) - q_act = self.restore_tensor(q_act, org_act_shape).to(org_act_dtype) - - if 'current_bit' in args: - self.bit = org_bit - - if 'int_indices' in args: - mix_act = torch.zeros_like(act) - mix_act[:, :, args['int_indices']] = q_act - mix_act[:, :, args['fp_indices']] = fp_act - return mix_act - - return q_act - - def fake_quant_act_dynamic(self, act, args={}): - if 'int_indices' in args: - q_act = act[:, :, args['int_indices']] - fp_act = act[:, :, args['fp_indices']] - else: - q_act = act - - if 'current_bit' in args: - org_bit = self.bit - self.bit = args['current_bit'] - - org_act_shape = q_act.shape - org_act_dtype = q_act.dtype - - q_act, scales, zeros, qmax, qmin = self.get_tensor_qparams(q_act, args) - q_act = self.quant_dequant(q_act, scales, zeros, qmax, qmin) - - q_act = self.restore_tensor(q_act, org_act_shape).to(org_act_dtype) - - if 'current_bit' in args: - self.bit = org_bit - - if 'int_indices' in args: - mix_act = torch.zeros_like(act) - mix_act[:, :, args['int_indices']] = q_act - mix_act[:, :, args['fp_indices']] = fp_act - return mix_act - if self.ste_all: - return (q_act - act).detach() + act - return q_act - - def fake_quant_weight_static(self, weight, args): - if 'int_indices' in args: - if self.granularity == 'per_group': - assert len(args['int_indices']) % self.group_size == 0 - q_weight = weight[:, args['int_indices']] - fp_weight = weight[:, args['fp_indices']] - - elif 'dim' in args and 'ic' in args['dim']: - q_weight = weight.T - else: - q_weight = weight - - if 'rounding' in args: - org_round_func = self.round_func - self.round_func = lambda x: torch.floor(x) + args['rounding'] - - org_w_shape = q_weight.shape - org_w_dtype = q_weight.dtype - scales, zeros, qmax, qmin = ( - args['scales'], - args['zeros'], - args['qmax'], - args['qmin'], - ) - output_scale_factor = ( - args['output_scale_factor'] if 'output_scale_factor' in args else 1 - ) - - q_weight = self.reshape_tensor(q_weight) - q_weight = self.quant_dequant( - q_weight, scales, zeros, qmax, qmin, output_scale_factor - ) - q_weight = self.restore_tensor(q_weight, org_w_shape).to(org_w_dtype) - - if 'int_indices' in args: - mix_weight = torch.zeros_like(weight) - mix_weight[:, args['int_indices']] = q_weight - mix_weight[:, args['fp_indices']] = fp_weight - return mix_weight - - elif 'dim' in args and 'ic' in args['dim']: - q_weight = q_weight.T - - if 'rounding' in args: - self.round_func = org_round_func - - return q_weight - - def fake_quant_weight_dynamic(self, weight, args={}): - if 'int_indices' in args: - if self.granularity == 'per_group': - assert len(args['int_indices']) % self.group_size == 0 - q_weight = weight[:, args['int_indices']] - fp_weight = weight[:, args['fp_indices']] - - elif 'dim' in args and 'ic' in args['dim']: - q_weight = weight.T - else: - q_weight = weight - - if 'current_bit' in args: - org_bit = self.bit - self.bit = args['current_bit'] - - org_w_shape = q_weight.shape - org_w_dtype = q_weight.dtype - - q_weight, scales, zeros, qmax, qmin = self.get_tensor_qparams(q_weight, args) - q_weight = self.quant_dequant(q_weight, scales, zeros, qmax, qmin) - - q_weight = self.restore_tensor(q_weight, org_w_shape).to(org_w_dtype) - - if 'current_bit' in args: - self.bit = org_bit - - if 'int_indices' in args: - mix_weight = torch.zeros_like(weight) - mix_weight[:, args['int_indices']] = q_weight - mix_weight[:, args['fp_indices']] = fp_weight - return mix_weight - - elif 'dim' in args and 'ic' in args['dim']: - q_weight = q_weight.T - - return q_weight - - def real_quant_weight_static(self, weight, args): - org_w_shape = weight.shape - if 'output_scale_factor' in args: - output_scale_factor = args['output_scale_factor'] - del args['output_scale_factor'] - else: - output_scale_factor = 1 - scales, zeros, qmax, qmin = ( - args['scales'], - args['zeros'], - args['qmax'], - args['qmin'], - ) - weight = self.reshape_tensor(weight) - weight = self.quant(weight, scales, zeros, qmax, qmin) - weight = self.restore_tensor(weight, org_w_shape) - - scales = scales * output_scale_factor - - if self.bit == 8: - if self.qmin != 0: - dtype = torch.int8 - else: - dtype = torch.uint8 - else: - dtype = torch.int32 - weight = weight.to(dtype) - if not self.sym and self.round_zp: - zeros = zeros.to(dtype) - elif self.sym: - zeros = None - - if self.granularity == 'per_tensor': - qparams_shape = 1 - elif self.granularity == 'per_block': - qparams_shape = (scales.shape[0], scales.shape[2]) - else: - qparams_shape = (weight.shape[0], -1) - - if zeros is not None: - zeros = zeros.view(qparams_shape) - scales = scales.view(qparams_shape) - - return weight, scales, zeros - - def real_quant_weight_dynamic(self, weight, args={}): - org_w_shape = weight.shape - if 'output_scale_factor' in args: - output_scale_factor = args['output_scale_factor'] - del args['output_scale_factor'] - else: - output_scale_factor = 1 - weight, scales, zeros, qmax, qmin = self.get_tensor_qparams(weight, args) - weight = self.quant(weight, scales, zeros, qmax, qmin) - weight = self.restore_tensor(weight, org_w_shape) - - scales = scales * output_scale_factor - - if self.bit == 8: - if self.qmin != 0: - dtype = torch.int8 - else: - dtype = torch.uint8 - else: - dtype = torch.int32 - weight = weight.to(dtype) - if not self.sym and self.round_zp: - zeros = zeros.to(dtype) - elif self.sym: - zeros = None - - if self.granularity == 'per_tensor': - qparams_shape = 1 - elif self.granularity == 'per_block': - qparams_shape = (scales.shape[0], scales.shape[2]) - else: - qparams_shape = (weight.shape[0], -1) - - if zeros is not None: - zeros = zeros.view(qparams_shape) - scales = scales.view(qparams_shape) - - return weight, scales, zeros - - def __repr__(self): - return ( - f'IntegerQuantizer(bit={self.bit}, sym={self.sym},' - f'granularity={self.granularity},' - f'kwargs={self.kwargs}, qmin={self.qmin}, qmax={self.qmax})' - ) - - -class FloatQuantizer(BaseQuantizer): - def __init__(self, bit, symmetric, granularity, **kwargs): - super().__init__(bit, symmetric, granularity, **kwargs) - self.sym = True - self.quant_type = 'float-quant' - self.e_bits = int(self.bit[1]) - self.m_bits = int(self.bit[-1]) - self.sign_bits = 1 - self.num_bits = self.e_bits + self.m_bits + self.sign_bits - self.default_bias = 2 ** (self.e_bits - 1) - self.dst_nbins = 2**self.num_bits - self.use_qtorch = self.kwargs.get('use_qtorch') - if self.use_qtorch: - assert ( - float_quantize is not None - ), 'Please install qtorch (pip install qtorch). Or set use_qtorch=False' - if 'float_range' in self.kwargs: - self.qmin, self.qmax = self.kwargs['float_range'] - else: - bit_ranges = { - ('e4m3', 8): torch.float8_e4m3fn, - ('e5m2', 8): torch.float8_e5m2, - ('e3m2', 6): (-28, 28), - ('e4m7', 12): (-510, 510), - ('e2m1', 4): (-6, 6), - } - - key = (self.bit, self.num_bits) - if key in bit_ranges: - if isinstance(bit_ranges[key], tuple): - self.qmin, self.qmax = bit_ranges[key] - else: - finfo = torch.finfo(bit_ranges[key]) - self.qmin, self.qmax = finfo.min, finfo.max - else: - raise NotImplementedError( - 'Only 4, 6, 8, and \ - 12-bit quantization is supported.' - ) - self.qmax = torch.tensor(self.qmax) - self.qmin = torch.tensor(self.qmin) - - def get_float_qparams(self, tensor, tensor_range, device): - min_val, max_val = tensor_range[0], tensor_range[1] - maxval = torch.max(max_val, -min_val) - - e_bits = torch.tensor(self.e_bits, dtype=torch.float32).cuda() - m_bits = torch.tensor(self.m_bits, dtype=torch.float32).cuda() - - if maxval.shape[0] != 1 and len(maxval.shape) != len(tensor.shape): - maxval = maxval.view([-1] + [1] * (len(tensor.shape) - 1)) - - if e_bits >= 5: - maxval = maxval.to(dtype=torch.float32) - - bias = 2**e_bits - torch.log2(maxval) + torch.log2(2 - 2 ** (-m_bits)) - 1 - - xc = torch.min(torch.max(tensor, -maxval), maxval) - - log_scales = torch.clamp( - (torch.floor(torch.log2(torch.abs(xc)) + bias)).detach(), 1.0 - ) - scales = 2.0 ** (log_scales - m_bits - bias) - - return xc, scales - - def get_hqq_qparams(self, tensor, args): - tensor = tensor.float() - tensor = self.reshape_tensor(tensor) - tensor_range = self.get_minmax_range(tensor) - if self.use_qtorch: - scales, zeros, qmax, qmin = self.get_qparams(tensor_range, tensor.device) - else: - tensor, scales = self.get_float_qparams(tensor, tensor_range, tensor.device) - zeros, qmin, qmax = torch.tensor(0), None, None - best_scales, best_zeros = self.optimize_weights_proximal( - tensor, scales, zeros, qmax, qmin - ) - return tensor, best_scales, best_zeros, qmax, qmin - - def get_tensor_qparams(self, tensor, args={}): - if self.calib_algo == 'hqq': - return self.get_hqq_qparams(tensor, args) - else: - tensor = self.reshape_tensor(tensor) - tensor_range = self.get_tensor_range(tensor, args) - if self.use_qtorch: - scales, zeros, qmax, qmin = self.get_qparams( - tensor_range, tensor.device - ) - else: - tensor, scales = self.get_float_qparams( - tensor, tensor_range, tensor.device - ) - zeros, qmin, qmax = torch.tensor(0), None, None - - return tensor, scales, zeros, qmax, qmin - - def quant(self, tensor, scales, zeros, qmax, qmin): - scales[scales == 0] = 1 - scaled_tensor = tensor / scales + zeros - if self.use_qtorch: - org_dtype = scaled_tensor.dtype - q_tensor = float_quantize( - scaled_tensor.float(), self.e_bits, self.m_bits, rounding='nearest' - ) - q_tensor.to(org_dtype) - else: - q_tensor = self.round_func(scaled_tensor) - return q_tensor - - def dequant(self, tensor, scales, zeros): - tensor = (tensor - zeros) * scales - return tensor - - def quant_dequant(self, tensor, scales, zeros, qmax, qmin): - tensor = self.quant(tensor, scales, zeros, qmax, qmin) - tensor = self.dequant(tensor, scales, zeros) - return tensor - - def fake_quant_act_static(self, act, args={}): - q_act = act - org_act_shape = q_act.shape - org_act_dtype = q_act.dtype - - scales, zeros, qmax, qmin = ( - args['scales'], - args['zeros'], - args['qmax'], - args['qmin'], - ) - q_act = self.reshape_tensor(q_act) - q_act = self.quant_dequant(q_act, scales, zeros, qmax, qmin) - q_act = self.restore_tensor(q_act, org_act_shape).to(org_act_dtype) - - return q_act - - def fake_quant_act_dynamic(self, act, args={}): - q_act = act - org_act_shape = q_act.shape - org_act_dtype = q_act.dtype - - q_act, scales, zeros, qmax, qmin = self.get_tensor_qparams(q_act, args) - q_act = self.quant_dequant(q_act, scales, zeros, qmax, qmin) - - q_act = self.restore_tensor(q_act, org_act_shape).to(org_act_dtype) - return q_act - - def fake_quant_weight_static(self, weight, args): - - if 'dim' in args and 'ic' in args['dim']: - q_weight = weight.T - else: - q_weight = weight - - if 'rounding' in args: - org_round_func = self.round_func - self.round_func = lambda x: torch.floor(x) + args['rounding'] - - org_w_shape = q_weight.shape - org_w_dtype = q_weight.dtype - scales, zeros, qmax, qmin = ( - args['scales'], - args['zeros'], - args['qmax'], - args['qmin'], - ) - q_weight = self.reshape_tensor(q_weight) - q_weight = self.quant_dequant(q_weight, scales, zeros, qmax, qmin) - q_weight = self.restore_tensor(q_weight, org_w_shape).to(org_w_dtype) - - if 'dim' in args and 'ic' in args['dim']: - q_weight = q_weight.T - - if 'rounding' in args: - self.round_func = org_round_func - - return q_weight - - def fake_quant_weight_dynamic(self, weight, args={}): - - if 'dim' in args and 'ic' in args['dim']: - q_weight = weight.T - else: - q_weight = weight - - org_w_shape = q_weight.shape - org_w_dtype = q_weight.dtype - - q_weight, scales, zeros, qmax, qmin = self.get_tensor_qparams(q_weight, args) - q_weight = self.quant_dequant(q_weight, scales, zeros, qmax, qmin) - q_weight = self.restore_tensor(q_weight, org_w_shape).to(org_w_dtype) - - if 'dim' in args and 'ic' in args['dim']: - q_weight = q_weight.T - - return q_weight - - def real_quant_weight_static(self, weight, args): - assert self.bit in ['e4m3', 'e5m2'], 'Only FP8 E4M3 and E5M2 support real quant' - dtype = torch.float8_e4m3fn if self.e_bits == 4 else torch.float8_e5m2 - - org_w_shape = weight.shape - if 'output_scale_factor' in args: - output_scale_factor = args['output_scale_factor'] - del args['output_scale_factor'] - else: - output_scale_factor = 1 - scales, zeros, qmax, qmin = ( - args['scales'], - args['zeros'], - args['qmax'], - args['qmin'], - ) - weight = self.reshape_tensor(weight) - weight = self.quant(weight, scales, zeros, qmax, qmin) - weight = self.restore_tensor(weight, org_w_shape) - - scales = scales * output_scale_factor - - weight = weight.to(dtype) - zeros = None - if self.granularity == 'per_tensor': - qparams_shape = 1 - elif self.granularity == 'per_block': - qparams_shape = (scales.shape[0], scales.shape[2]) - else: - qparams_shape = (weight.shape[0], -1) - - scales = scales.view(qparams_shape) - return weight, scales, zeros - - def real_quant_weight_dynamic(self, weight, args={}): - assert self.bit in ['e4m3', 'e5m2'], 'Only FP8 E4M3 and E5M2 support real quant' - dtype = torch.float8_e4m3fn if self.e_bits == 4 else torch.float8_e5m2 - - org_w_shape = weight.shape - if 'output_scale_factor' in args: - output_scale_factor = args['output_scale_factor'] - del args['output_scale_factor'] - else: - output_scale_factor = 1 - weight, scales, zeros, qmax, qmin = self.get_tensor_qparams(weight, args) - weight = self.quant(weight, scales, zeros, qmax, qmin) - weight = self.restore_tensor(weight, org_w_shape) - - scales = scales * output_scale_factor - - weight = weight.to(dtype) - zeros = None - if self.granularity == 'per_tensor': - qparams_shape = 1 - elif self.granularity == 'per_block': - qparams_shape = (scales.shape[0], scales.shape[2]) - else: - qparams_shape = (weight.shape[0], -1) - - scales = scales.view(qparams_shape) - return weight, scales, zeros - - def __repr__(self): - return ( - f'FloatQuantizer(bit={self.bit},' - f'e_bits={self.e_bits}, m_bits={self.m_bits},' - f'granularity={self.granularity},' - f'kwargs={self.kwargs}, qmin={self.qmin}, qmax={self.qmax})' - ) - - -class Weight48IntegerQuantizer(BaseQuantizer): - # flake8: noqa - def __init__(self, bit, bit4, bit8, **kwargs): - super().__init__(bit, None, None, **kwargs) - self.quant_type = 'int-quant-w48' - assert self.bit == 48, 'Only support 48-bit quantization' - self.bit_settings = {} - self.bit_settings[4] = bit4 - self.bit_settings[8] = bit8 - for bit in [4, 8]: - if 'int_range' in self.bit_settings[bit]: - self.bit_settings[bit]['qmin'] = self.bit_settings[bit]['int_range'][0] - self.bit_settings[bit]['qmax'] = self.bit_settings[bit]['int_range'][1] - else: - if self.bit_settings[bit]['symmetric']: - self.bit_settings[bit]['qmin'] = -(2 ** (bit - 1)) - self.bit_settings[bit]['qmax'] = 2 ** (bit - 1) - 1 - else: - self.bit_settings[bit]['qmin'] = 0 - self.bit_settings[bit]['qmax'] = 2**bit - 1 - self.bit_settings[bit]['qmin'] = torch.tensor( - self.bit_settings[bit]['qmin'] - ) - self.bit_settings[bit]['qmax'] = torch.tensor( - self.bit_settings[bit]['qmax'] - ) - if 'scales_bit' in self.bit_settings[bit]: - if self.bit_settings[bit]['scales_symmetric']: - self.bit_settings[bit]['scales_qmin'] = -( - 2 ** (self.bit_settings[bit]['scales_bit'] - 1) - ) - self.bit_settings[bit]['scales_qmax'] = ( - 2 ** (self.bit_settings[bit]['scales_bit'] - 1) - 1 - ) - else: - self.bit_settings[bit]['scales_qmin'] = 0 - self.bit_settings[bit]['scales_qmax'] = ( - 2 ** self.bit_settings[bit]['scales_bit'] - 1 - ) - else: - self.bit_settings[bit]['scales_qmin'] = -torch.inf - self.bit_settings[bit]['scales_qmax'] = torch.inf - if 'zeros_bit' in self.bit_settings[bit]: - if self.bit_settings[bit]['zeros_symmetric']: - self.bit_settings[bit]['zeros_qmin'] = -( - 2 ** (self.bit_settings[bit]['scales_bit'] - 1) - ) - self.bit_settings[bit]['zeros_qmax'] = ( - 2 ** (self.bit_settings[bit]['scales_bit'] - 1) - 1 - ) - else: - self.bit_settings[bit]['zeros_qmin'] = 0 - self.bit_settings[bit]['zeros_qmax'] = ( - 2 ** self.bit_settings[bit]['scales_bit'] - 1 - ) - else: - self.bit_settings[bit]['zeros_qmin'] = self.bit_settings[bit]['qmin'] - self.bit_settings[bit]['zeros_qmax'] = self.bit_settings[bit]['qmax'] - - def reshape_tensor(self, tensor, bit=4): - granularity = self.bit_settings[bit].get('granularity') - if granularity == 'per_group': - group_size = self.bit_settings[bit].get('group_size') - if tensor.shape[-1] % group_size == 0: - t = tensor.reshape(-1, group_size) - else: - raise ValueError( - f'Dimension {tensor.shape[-1]} ' - f'not divisible by group size {group_size}' - ) - else: - t = tensor - return t - - def get_qparams(self, tensor_range, device, bit): - min_val, max_val = tensor_range[0], tensor_range[1] - qmin = self.bit_settings[bit]['qmin'].to(device) - qmax = self.bit_settings[bit]['qmax'].to(device) - sym = self.bit_settings[bit]['symmetric'] - if sym: - abs_max = torch.max(max_val.abs(), min_val.abs()) - abs_max = abs_max.clamp(min=1e-5) - scales = abs_max / qmax - zeros = torch.tensor(0.0) - else: - scales = (max_val - min_val).clamp(min=1e-5) / (qmax - qmin) - zeros = qmin - torch.round(min_val / scales) - scales = scales.clamp( - self.bit_settings[bit]['scales_qmin'], self.bit_settings[bit]['scales_qmax'] - ) - zeros = zeros.clamp( - self.bit_settings[bit]['zeros_qmin'], self.bit_settings[bit]['zeros_qmax'] - ) - return scales, zeros, qmax, qmin - - def quant(self, tensor, scales, zeros, qmax, qmin): - tensor = torch.clamp(self.round_func(tensor / scales) + zeros, qmin, qmax) - return tensor - - def dequant(self, tensor, scales, zeros): - tensor = (tensor - zeros) * scales - return tensor - - def quant_dequant(self, tensor, scales, zeros, qmax, qmin): - tensor = self.quant(tensor, scales, zeros, qmax, qmin) - tensor = self.dequant(tensor, scales, zeros) - return tensor - - def fake_quant_weight_dynamic(self, weight, args={}): - # step 1: quantize to 8-bit - org_shape16 = weight.shape - org_dtype16 = weight.dtype - weight = self.reshape_tensor(weight, bit=8) - weight_range = self.get_tensor_range(weight) - scales816, zeros816, qmax816, qmin816 = self.get_qparams( - weight_range, weight.device, bit=8 - ) - weight = self.quant(weight, scales816, zeros816, qmax816, qmin816) - - # step 2: quantize to 4-bit - org_shape8 = weight.shape - org_dtype8 = weight.dtype - weight = self.reshape_tensor(weight, bit=4) - weight_range = self.get_tensor_range(weight) - scales48, zeros48, qmax48, qmin48 = self.get_qparams( - weight_range, weight.device, bit=4 - ) - weight = self.quant(weight, scales48, zeros48, qmax48, qmin48) - - # step 3: dequantize to 8-bit - weight = self.dequant(weight, scales48, zeros48) - weight = self.restore_tensor(weight, org_shape8).to(org_dtype8) - - # step 4: dequantize to 16-bit - weight = self.dequant(weight, scales816, zeros816) - weight = self.restore_tensor(weight, org_shape16).to(org_dtype16) - - return weight diff --git a/llmc/compression/quantization/quarot.py b/llmc/compression/quantization/quarot.py deleted file mode 100755 index 91bd855ae..000000000 --- a/llmc/compression/quantization/quarot.py +++ /dev/null @@ -1,155 +0,0 @@ -import gc -import json -import os - -import torch -import torch.nn as nn -from loguru import logger - -from llmc.utils.registry_factory import ALGO_REGISTRY - -from .base_blockwise_quantization import BaseBlockwiseQuantization -from .hadamard_utils import apply_exact_had_to_linear, random_hadamard_matrix -from .module_utils import (_LLMC_LN_TYPES_, _TRANSFORMERS_LN_TYPES_, - LlmcRMSNorm, RotateLinear) - - -@ALGO_REGISTRY -class Quarot(BaseBlockwiseQuantization): - def __init__(self, model, quant_config, input, padding_mask, config): - super().__init__(model, quant_config, input, padding_mask, config) - self.dev = torch.device('cuda') - self.add_quant_config() - self.preprocess() - - def preprocess(self): - if torch.equal( - self.model.get_head_layers()[0].weight, - self.model.get_embed_layers()[0].weight, - ): - logger.info('Tie weight! Copy embed_layer for head_layer!') - del self.model.get_head_layers()[0].weight - w = self.model.get_embed_layers()[0].weight.clone() - self.model.get_head_layers()[0].weight = nn.Parameter(w) - - self.remove_mean_from_embed() - - self.Q = self.get_orthogonal_matrix() - self.rotate_embeddings(self.Q) - - pre_head_ln = self.model.get_pre_head_layernorm_layers()[0] - self.fuse_ln_fcs(pre_head_ln, self.model.get_head_layers()) - - self.model.replace_module_subset( - LlmcRMSNorm, - self.model.model, - {'layers': {'model.norm': pre_head_ln}}, - None, - {}, - ) - - self.rotate_head(self.Q) - - for rot_layer in self.model.get_extra_rot_module_besides_embed_layers(): - logger.info('For multimodal model, quarot need rotate last layer in projector.') - logger.info(f'rot_layer : {rot_layer}') - # docformatter: off - """ - txt_input img_input - | | - Embedding vision_projector - | | - | - input_embeds - | - Y - Therefore: - X_txt ~ W_embedding * Q = X_txt ~ (W_embedding * Q) - X_proj * W_proj.t() * Q = X_proj * (Q.t() * W_proj).t() - """ - # docformatter: on - dtype = rot_layer.weight.dtype - device = self.Q.device - W = rot_layer.weight.data.to(device=device, dtype=torch.float64) - rot_layer.weight.data = torch.matmul(self.Q.T, W).to(device='cpu', dtype=dtype) # noqa - - gc.collect() - torch.cuda.empty_cache() - - @torch.no_grad() - def add_quant_config(self): - self.rotate_mode = self.quant_config['special']['rotate_mode'] - - def random_orthogonal_matrix(self, size, device): - torch.cuda.empty_cache() - random_matrix = torch.randn(size, size, dtype=torch.float64).to(device) - q, r = torch.linalg.qr(random_matrix) - q *= torch.sign(torch.diag(r)).unsqueeze(0) - return q - - def get_orthogonal_matrix(self): - if self.rotate_mode == 'random': - return self.random_orthogonal_matrix(self.hidden_size, self.dev) - elif self.rotate_mode == 'hadamard': - return random_hadamard_matrix(self.hidden_size, self.dev) - else: - raise ValueError(f'Unsupported mode {self.mode}') - - def block_transform(self, block): - logger.info(f'Start transform the {self.block_idx+1}-th block') - - if self.online_rotate: - self.replace_rotate_linears(block) - subsets = self.model.get_subsets_in_block(block) - for index, subset in enumerate(subsets): - self.subset_transform(block, subset) - - self.model.replace_module_block(LlmcRMSNorm, block, self.block_idx, {}) - gc.collect() - - logger.info(f'block:{block}') - logger.info(f'End transform the {self.block_idx+1}-th block') - - @torch.no_grad() - def subset_transform(self, block, subset): - prev_op = subset['prev_op'] - layers_dict = subset['layers'] - assert ( - len(prev_op) == 1 - ), 'Only support single prev_op. If multi prev_ops, code need to be updated.' - - layers = list(layers_dict.values()) - - if 'skip_rotate' in subset and subset['skip_rotate']: - return - - if isinstance(prev_op[0], tuple(_LLMC_LN_TYPES_ + _TRANSFORMERS_LN_TYPES_)): - self.fuse_ln_fcs(prev_op[0], layers) - self.rotate_pre_layers(layers, self.Q) - else: - if self.config['model']['type'] in ['Opt', 'StableLm']: - self.bake_mean_into_fc(layers[0]) - - if 'is_mlp' in subset and subset['is_mlp']: - self.rotate_post_layers( - layers, self.Q, exact_had=True if self.online_rotate else False - ) - else: - self.rotate_post_layers(layers, self.Q, exact_had=False) - if self.online_rotate: - if prev_op[0] is not None: - apply_exact_had_to_linear( - prev_op[0], had_dim=self.head_dim, output=True - ) - apply_exact_had_to_linear(layers[0], had_dim=-1, output=False) - - @torch.no_grad() - def save_model(self, path): - super().save_model(path) - path = os.path.join(path, 'config.json') - with open(path, 'r') as f: - config = json.load(f) - if 'tie_word_embeddings' in config: - config['tie_word_embeddings'] = False - with open(path, 'w') as f: - json.dump(config, f, indent=4) diff --git a/llmc/compression/quantization/quik.py b/llmc/compression/quantization/quik.py deleted file mode 100644 index 3a1e0441b..000000000 --- a/llmc/compression/quantization/quik.py +++ /dev/null @@ -1,151 +0,0 @@ -import functools -import gc - -import torch -import torch.nn as nn -from tqdm import tqdm - -from llmc.utils.registry_factory import ALGO_REGISTRY - -from .base_blockwise_quantization import BaseBlockwiseQuantization - - -@ALGO_REGISTRY -class QUIK(BaseBlockwiseQuantization): - def __init__(self, model, quant_config, input, padding_mask, config): - super().__init__(model, quant_config, input, padding_mask, config) - self.add_quant_config() - - def add_quant_config(self): - self.prefix = self.model.block_name_prefix - self.fp_relative = self.quant_config['special']['fp_relative'] - self.fp_features = self.quant_config['special']['fp_features'] - self.fp_threshold = self.quant_config['special']['fp_threshold'] - if 'last_fc_bit' in self.quant_config: - self.last_fc_bit = self.quant_config['special']['last_fc_bit'] - self.act_scales = self.get_act_scale_shift(stat='scales') - self.int_ids = {} - self.fp_ids = {} - - def get_act_scale_shift(self, stat='scales'): - self.model.model.eval() - - act_stat = {} - - def get_tensor_scale(name, tensor): - hidden_dim = tensor.shape[-1] - tensor = tensor.view(-1, hidden_dim).abs().detach() - comming_max = torch.max(tensor, dim=0)[0].float().cpu() - if name in act_stat: - act_stat[name] = torch.max(act_stat[name], comming_max) - else: - act_stat[name] = comming_max - - def get_tensor_shift(name, tensor): - hidden_dim = tensor.shape[-1] - tensor = tensor.view(-1, hidden_dim).detach() - comming_max = torch.max(tensor, dim=0)[0].float().cpu() - comming_min = torch.min(tensor, dim=0)[0].float().cpu() - if name in act_stat: - act_stat[name] = 0.99 * act_stat[name] + 0.01 * ( - (comming_max + comming_min) / 2 - ) - else: - act_stat[name] = (comming_max + comming_min) / 2 - - def stat_input_hook(m, x, y, name): - if isinstance(x, tuple): - x = x[0] - if stat == 'scales': - get_tensor_scale(name, x) - elif stat == 'shifts': - get_tensor_shift(name, x) - - hooks = [] - for name, m in self.model.model.named_modules(): - if isinstance(m, nn.Linear): - hooks.append( - m.register_forward_hook( - functools.partial(stat_input_hook, name=name) - ) - ) - - with torch.no_grad(): - for i in tqdm(range(len(self.blocks))): - block = self.blocks[i] - block.cuda() - if i == 0: - fp_inps = self.block_forward(block) - else: - fp_inps = self.block_forward(block, fp_inps) - - block.cpu() - - for h in hooks: - h.remove() - gc.collect() - torch.cuda.empty_cache() - - return act_stat - - def block_opt(self, block): - layers_dict = self.model.get_block_linears(block) - for n, m in layers_dict.items(): - layer_name = f'{self.prefix}.{self.block_idx}.{n}' - - if self.fp_relative: - outlier_num = ( - int(block.in_features / self.model.model.config.hidden_size) - * self.fp_features - ) - else: - outlier_num = self.fp_features - - layer_scales = None - if outlier_num > 0: - layer_scales = self.act_scales[layer_name] - max_val = layer_scales.abs().max() - - fp_threshold = self.fp_threshold - if hasattr(self, 'last_fc_bit'): - if 'dense_4h_to_h' in n or 'down_proj' in n: - fp_threshold = self.fp_threshold * 2 - m.register_buffer( - 'buf_current_bit', torch.tensor(self.last_fc_bit) - ) - - if max_val <= fp_threshold: - outlier_num = 0 - layer_scales = None - - int_indices = torch.sort(layer_scales)[1][:-outlier_num] - fp_indices = torch.sort(layer_scales)[1][-outlier_num:] - - m.register_buffer('buf_int_ids', int_indices) - m.register_buffer('buf_fp_ids', fp_indices) - del self.act_scales[layer_name] - - def w_qdq(self, module, wquantizer): - weight = module.weight - args = {} - args['int_indices'] = module.buf_int_ids - args['fp_indices'] = module.buf_fp_ids - - if hasattr(module, 'buf_current_bit'): - args['current_bit'] = module.buf_current_bit - - weight = self.wquantizer.fake_quant_weight_dynamic(weight, args) - - return weight - - def a_qdq(self, act, module, aquantizer): - args = {} - args['int_indices'] = module.buf_int_ids - args['fp_indices'] = module.buf_fp_ids - - if hasattr(module, 'buf_current_bit'): - args['current_bit'] = module.buf_current_bit - - act = self.aquantizer.fake_quant_act_dynamic(act, args) - - return act diff --git a/llmc/compression/quantization/rtn.py b/llmc/compression/quantization/rtn.py deleted file mode 100644 index 435b234df..000000000 --- a/llmc/compression/quantization/rtn.py +++ /dev/null @@ -1,28 +0,0 @@ -import torch -from loguru import logger - -from llmc.utils.registry_factory import ALGO_REGISTRY - -from .base_blockwise_quantization import BaseBlockwiseQuantization - - -@ALGO_REGISTRY -class RTN(BaseBlockwiseQuantization): - def __init__(self, model, quant_config, input, padding_mask, config): - super().__init__(model, quant_config, input, padding_mask, config) - - @torch.no_grad() - def block_opt(self, block, *opt_kwargs): - if self.quant_kvcache: - self.register_kv_cache(block) - if self.act_static: - super().block_opt(block, *opt_kwargs) - - @torch.no_grad() - def subset_transform( - self, - subset, - input_feat, - subset_kwargs, - ): - pass diff --git a/llmc/compression/quantization/smoothquant.py b/llmc/compression/quantization/smoothquant.py deleted file mode 100755 index 88de97b52..000000000 --- a/llmc/compression/quantization/smoothquant.py +++ /dev/null @@ -1,79 +0,0 @@ -import gc - -import torch -import torch.nn as nn -from loguru import logger - -from llmc.utils.registry_factory import ALGO_REGISTRY - -from .base_blockwise_quantization import BaseBlockwiseQuantization -from .module_utils import _LLMC_LN_TYPES_, _TRANSFORMERS_LN_TYPES_ - - -@ALGO_REGISTRY -class SmoothQuant(BaseBlockwiseQuantization): - def __init__(self, model, quant_config, input, padding_mask, config): - super().__init__(model, quant_config, input, padding_mask, config) - special_config = self.quant_config.get('special', {}) - self.alpha = special_config.get('alpha', 0.5) - - @torch.no_grad() - def filter_subset(self, prev_op): - if isinstance(prev_op[0], tuple(_LLMC_LN_TYPES_ + _TRANSFORMERS_LN_TYPES_)): - return True - else: - return False - - @torch.no_grad() - def get_weight_scale(self, layers): - weights = self.collect_layers_weights(layers) - scale = torch.cat( - [fc.abs().max(dim=0, keepdim=True)[0] for fc in weights], dim=0 - ) - scale = scale.max(dim=0)[0].clamp(min=1e-5) - del weights - gc.collect() - torch.cuda.empty_cache() - return scale - - @torch.no_grad() - def get_act_scale(self, tensors): - scale_max = None - for x in tensors: - x = x.cuda() - x = x.abs().view(-1, x.shape[-1]) - comming_max = torch.max(x, dim=0)[0].float() - if scale_max is not None: - scale_max = torch.max(scale_max, comming_max) - else: - scale_max = comming_max - x = x.cpu() - return scale_max - - @torch.no_grad() - def search_scale_subset(self, layers, tensors): - w_max = self.get_weight_scale(layers) - x_max = self.get_act_scale(tensors) - x_max = x_max.to(dtype=w_max.dtype, device=w_max.device) - scale = (x_max.pow(self.alpha) / w_max.pow(1 - self.alpha)).clamp(min=1e-5) - return scale - - @torch.no_grad() - def subset_transform( - self, - subset, - input_feat, - subset_kwargs, - ): - layers_dict = subset['layers'] - prev_op = subset['prev_op'] - input_name = subset['input'][0] - - if not self.filter_subset(prev_op): - logger.info('Do not transform this subset.') - return - layers = list(layers_dict.values()) - scale = self.search_scale_subset(layers, input_feat[input_name]) - self.apply_scale(scale, prev_op, layers) - if self.act_static: - self.update_input_feat(scale, input_feat, layers_dict, False) diff --git a/llmc/compression/quantization/spqr.py b/llmc/compression/quantization/spqr.py deleted file mode 100755 index a371b9356..000000000 --- a/llmc/compression/quantization/spqr.py +++ /dev/null @@ -1,398 +0,0 @@ -import copy -import functools -import math -import time -from collections import defaultdict - -import torch -import torch.nn as nn -import transformers -from loguru import logger - -from llmc.utils.registry_factory import ALGO_REGISTRY - -from .base_blockwise_quantization import BaseBlockwiseQuantization -from .module_utils import FakeQuantLinear -from .quant import IntegerQuantizer - - -@ALGO_REGISTRY -class SpQR(BaseBlockwiseQuantization): - def __init__(self, model, quant_config, input, padding_mask, config): - super().__init__(model, quant_config, input, padding_mask, config) - assert ( - self.wquantizer.granularity == 'per_group' - ), 'SpQR only supports per_group quantization' - self.dev = torch.device('cuda') - self.model_dtype = next(self.model.model.parameters()).dtype - self.add_quant_config() - self.layers_cache = {} - self.model_qparams = defaultdict() - - @torch.no_grad() - def add_quant_config(self): - special_config = self.quant_config['special'] - - self.prefix = self.model.block_name_prefix - self.true_sequential = special_config['true_sequential'] - self.actorder = special_config['actorder'] - self.percdamp = special_config['percdamp'] - self.blocksize = special_config['blocksize'] - self.relative_threshold = special_config['relative_threshold'] - self.simplified_outliers = special_config['simplified_outliers'] - - if self.wquantizer.granularity == 'per_group' and self.actorder: - self.need_perm = True - - if self.relative_threshold == 'inf': - self.relative_threshold = math.inf - - scale_config = special_config['scale'] - zero_config = special_config['zero'] - - self.quant_type = self.quant_config.get('quant_type', 'int-quant') - assert self.quant_type != 'float-quant', 'SPQR do not support Float quant now.' - self.scale_quantizer = IntegerQuantizer(**scale_config) - self.zero_quantizer = IntegerQuantizer(**zero_config) - self.Q = IntegerQuantizer( - self.wquantizer.bit, self.wquantizer.sym, 'per_channel', round_zp=False - ) - - @torch.no_grad() - def block_transform_true_sequential(self, block, input_feat): - - subsets = self.model.get_subsets_in_block(block) - for subset in subsets: - handles = [] - self.subset_init(subset) - - for name in subset['layers']: - handles.append( - subset['layers'][name].register_forward_hook( - functools.partial( - self.cache_input_hook, name=name, feat_dict=input_feat - ) - ) - ) - self.block_forward(block) - for h in handles: - h.remove() - torch.cuda.empty_cache() - - self.subset_transform(subset['layers']) - self.model.replace_module_subset( - FakeQuantLinear, - block, - subset, - self.block_idx, - self.get_replacement_params(mode='fake_quant', w_only=True), - ) - - @torch.no_grad() - def block_transform(self, block, input_feat, *block_kwargs): - logger.info(f'Start transform the {self.block_idx+1}-th block') - - if self.true_sequential: - self.block_transform_true_sequential(block, input_feat) - else: - layers_dict = self.model.get_block_linears(block) - self.subset_transform(layers_dict) - self.model.replace_module_block( - FakeQuantLinear, - block, - self.get_replacement_params(mode='fake_quant', w_only=True), - ) - - logger.info(f'End transform the {self.block_idx+1}-th block') - - @torch.no_grad() - def subset_transform(self, layers_dict): - for name in layers_dict: - layer = layers_dict[name] - self.layer_transform(layer, name) - self.free(name) - - @torch.no_grad() - def layer_transform(self, layer, name): - self.qparams = {} - self.columns = self.layers_cache[name]['columns'] - W = layer.weight.data.clone() - if isinstance(layer, nn.Conv2d): - W = W.flatten(1) - if isinstance(layer, transformers.Conv1D): - W = W.t() - - W = W.float() - - tick = time.time() - - self.groups = [None] * (self.columns // self.wquantizer.group_size) - - H = self.layers_cache[name]['H'] - del self.layers_cache[name]['H'] - - if self.actorder: - self.perm = torch.argsort(torch.diag(H), descending=True) - W = W[:, self.perm] - H = H[self.perm][:, self.perm] - self.invperm = torch.argsort(self.perm) - layer.register_buffer('buf_perm', self.perm) - layer.register_buffer('buf_invperm', self.invperm) - - dead = torch.diag(H) == 0 - if self.percdamp > 0: - damp = self.percdamp * abs(torch.diag(H)).mean() - diag = torch.arange(self.columns, device=self.dev) - H[diag, diag] += damp - del diag - H[dead, dead] = 1 - W[:, dead] = 0 - - Losses = torch.zeros_like(W) - tmp = torch.zeros_like(W) - - H = torch.linalg.cholesky(H) - H = torch.cholesky_inverse(H) - H = torch.linalg.cholesky(H, upper=True) - Hinv = H - mask = torch.zeros_like(W, dtype=torch.bool) - self.weight_transform(W, Hinv, Losses, tmp, mask) - - torch.cuda.synchronize() - logger.info(f'time {time.time() - tick}') - logger.info(f'error {torch.sum(Losses).item()}') - - if self.actorder: - tmp = tmp[:, self.invperm] - mask = mask[:, self.invperm] - - if isinstance(layer, transformers.Conv1D): - tmp = tmp.t() - mask = mask.t() - - assert layer.weight.shape == tmp.shape - layer.weight.data = tmp - - logger.info(f'tmp {tmp}') - logger.info(f'outliers {torch.sum(mask)} / {mask.numel()}') - - if self.wquantizer.granularity == 'per_group': - self.set_model_qparams(layer) - layer.register_buffer('buf_mask', mask.float().to_sparse()) - - @torch.no_grad() - def weight_transform(self, W, Hinv, Losses, tmp, mask): - def outliers(G, HinvGD): - indices = torch.arange(G.shape[1], device=G.device) - indices = indices[1:] - (indices[:, None] >= indices[1:]).to(indices.dtype) - LooG = G[:, indices] - - _, s, z, N, P = self.Q.get_tensor_qparams(LooG.flatten(0, 1)) - LooRG = self.Q.quant_dequant(LooG.flatten(0, 1), s, z, N, P).reshape( - LooG.shape - ) - LooHinvGD = HinvGD[indices] - LooError = ((LooRG - LooG) / LooHinvGD).square().sum(-1) - - _, s, z, N, P = self.Q.get_tensor_qparams(G) - BaseRG = self.Q.quant_dequant(G, s, z, N, P) - BaseError = ((BaseRG - G) / HinvGD).square().sum(dim=1, keepdim=True) - - return BaseError - LooError - - outlier_scale = (W.var(dim=0) / torch.diag(Hinv).square()).mean().item() - threshold = self.relative_threshold * outlier_scale - logger.info(f'threshold {threshold}') - - for i1 in range(0, self.columns, self.blocksize): - i2 = min(i1 + self.blocksize, self.columns) - Err1 = torch.zeros((W.shape[0], i2 - i1), device=W.device) - Losses1 = torch.zeros((W.shape[0], i2 - i1), device=W.device) - - for i in range(i1, i2): - if i % self.wquantizer.group_size == 0: - G = W[:, i: i + self.wquantizer.group_size] - - if self.simplified_outliers or threshold == math.inf: - self.get_group_qparams(G, i) - else: - HinvGD = torch.diag(Hinv)[i: i + self.wquantizer.group_size] - E = outliers(G, HinvGD) - M = (E > threshold).float() - mean = torch.sum(G * (1 - M), dim=1, keepdim=True) / torch.sum( - 1 - M, dim=1, keepdim=True - ).clamp_min(1) - newG = G * (1 - M) + mean * M - self.get_group_qparams(newG, i) - del HinvGD, E, M, mean, newG - - del G - - q = self.wquantizer.quant_dequant( - W[:, i].unsqueeze(1), - self.qparams['scales'], - self.qparams['zeros'], - self.qparams['qmax'], - self.qparams['qmin'], - ).squeeze(1) - - err = (W[:, i] - q) / Hinv[i, i] - if threshold != math.inf: - mask[:, i] = err.square() > threshold - M = mask[:, i].float() - newq = q * (1 - M) + W[:, i] * M - err = (W[:, i] - newq) / Hinv[i, i] - tmp[:, i] = W[:, i] - Losses1[:, i - i1] = err.square() - W[:, i + 1: i2] -= err.unsqueeze(1).matmul( - Hinv[i, i + 1: i2].unsqueeze(0) - ) - Err1[:, i - i1] = err - - Losses[:, i1:i2] = Losses1 - W[:, i2:] -= Err1.matmul(Hinv[i1:i2, i2:]) - - @torch.no_grad() - def cache_input_hook(self, m, inp, out, name, feat_dict): - self.add_batch(self.named_layers[name], name, inp[0].data, out.data) - - @torch.no_grad() - def add_batch(self, layer, name, inp, out): - if len(inp.shape) == 2: - inp = inp.unsqueeze(0) - tmp = inp.shape[0] - if isinstance(layer, (FakeQuantLinear, nn.Linear, transformers.Conv1D)): - if len(inp.shape) == 3: - inp = inp.reshape((-1, inp.shape[-1])) - inp = inp.t() - if isinstance(layer, nn.Conv2d): - unfold = nn.Unfold( - layer.kernel_size, - dilation=layer.dilation, - padding=layer.padding, - stride=layer.stride, - ) - inp = unfold(inp) - inp = inp.permute([1, 0, 2]) - inp = inp.flatten(1) - - self.layers_cache[name]['H'] *= self.layers_cache[name]['nsamples'] / ( - self.layers_cache[name]['nsamples'] + tmp - ) - self.layers_cache[name]['nsamples'] += tmp - inp = math.sqrt(2 / self.layers_cache[name]['nsamples']) * inp.float() - self.layers_cache[name]['H'] += inp.matmul(inp.t()) - - @torch.no_grad() - def layer_init(self, layer, name): - W = layer.weight.data.clone() - if isinstance(layer, nn.Conv2d): - W = W.flatten(1) - if isinstance(layer, transformers.Conv1D): - W = W.t() - self.layers_cache[name]['H'] = torch.zeros( - (W.shape[1], W.shape[1]), device=self.dev - ) - self.layers_cache[name]['nsamples'] = 0 - self.layers_cache[name]['columns'] = W.shape[1] - - @torch.no_grad() - def subset_init(self, subset): - self.named_layers = subset['layers'] - for name in self.named_layers: - self.layers_cache[name] = {} - self.layer_init(self.named_layers[name], name) - - @torch.no_grad() - def block_init(self, block): - self.named_layers = self.model.get_block_linears(block) - for name in self.named_layers: - self.layers_cache[name] = {} - self.layer_init(self.named_layers[name], name) - - @torch.no_grad() - def merge_qparams(self, qparams): - if isinstance(qparams, int): - return qparams - elif self.wquantizer.granularity == 'per_group': - qparams = torch.stack(qparams, dim=1) - qparams = qparams.reshape(-1, 1) - return qparams - - @torch.no_grad() - def get_group_qparams(self, c_tensor, idx): - """Get qparams for a group, idx is the index of a column within a - group, c_tensor is a group.""" - _, s, z, qmax, qmin = self.wquantizer.get_tensor_qparams(c_tensor) - _, ss, zs, Ps, Ns = self.scale_quantizer.get_tensor_qparams(s) - args = {} - args['scales'] = ss - args['zeros'] = zs - args['qmin'] = Ns - args['qmax'] = Ps - scales = self.scale_quantizer.fake_quant_weight_static(s.data, args) - _, sz, zz, Pz, Nz = self.zero_quantizer.get_tensor_qparams(z) - args['scales'] = sz - args['zeros'] = zz - args['qmin'] = Nz - args['qmax'] = Pz - zeros = self.zero_quantizer.fake_quant_weight_static(z.data, args) - self.qparams['scales'] = scales - self.qparams['zeros'] = zeros - self.qparams['qmax'] = qmax - self.qparams['qmin'] = qmin - qparams = copy.deepcopy(self.qparams) - self.groups[idx // self.wquantizer.group_size] = qparams - - @torch.no_grad() - def set_model_qparams(self, layer): - d = defaultdict(list) - d['scales'] = self.merge_qparams([g['scales'] for g in self.groups]) - d['zeros'] = self.merge_qparams([g['zeros'] for g in self.groups]) - for k, v in d.items(): - layer.register_buffer('buf_' + k, copy.deepcopy(v)) - layer.register_buffer('buf_qmax', torch.tensor(self.groups[0]['qmax'])) - layer.register_buffer('buf_qmin', torch.tensor(self.groups[0]['qmin'])) - - @torch.no_grad() - def free(self, name): - del self.layers_cache[name] - torch.cuda.empty_cache() - - @torch.no_grad() - def w_q(self, weight, qargs): - pass - - @torch.no_grad() - def w_qdq(self, module, wquantizer): - mask = module.buf_mask.to_dense() - weight = module.weight - out = (mask * weight).to(self.model_dtype) - if hasattr(self, 'need_perm'): - perm = module.buf_perm - weight = weight[:, perm] - - args = {} - args['scales'] = module.buf_scales - args['zeros'] = module.buf_zeros - args['qmax'] = module.buf_qmax - args['qmin'] = module.buf_qmin - - weight = wquantizer.fake_quant_weight_static(weight, args).to(self.model_dtype) - - if hasattr(self, 'need_perm'): - invperm = module.buf_invperm - weight = weight[:, invperm] - weight = (weight * (1 - mask) + out).to(self.model_dtype) - return weight - - @torch.no_grad() - def deploy(self, quant_format): - if quant_format == 'real_quant': - assert False, 'SpQR does not support real quantization' - super().deploy(quant_format) - - @torch.no_grad() - def save_model(self, path): - self.model.convert_dtype(self.model_dtype) - super().save_model(path) diff --git a/llmc/compression/quantization/tesseraq.py b/llmc/compression/quantization/tesseraq.py deleted file mode 100644 index f81f0d762..000000000 --- a/llmc/compression/quantization/tesseraq.py +++ /dev/null @@ -1,497 +0,0 @@ -import copy -import functools -import gc -import math -import os -import pdb -import random -from contextlib import nullcontext -from math import inf -from random import sample - -import numpy as np -import torch -import torch.nn as nn -from loguru import logger -from tqdm import tqdm - -from llmc.utils.registry_factory import ALGO_REGISTRY - -from .base_blockwise_quantization import BaseBlockwiseQuantization -from .module_utils import FakeQuantLinear, RectifiedSigmoid -from .train_utils import AvgMeter, LossFunction, NativeScalerWithGradNormCount - - -@ALGO_REGISTRY -class TesseraQ(BaseBlockwiseQuantization): - def __init__(self, model, quant_config, input, padding_mask, config): - super().__init__(model, quant_config, input, padding_mask, config) - self.add_quant_config() - - self.attention_mask = self.input['kwargs'][0].get('attention_mask') - model_type = self.config['model']['type'] - self.position_ids = ( - self.input['kwargs'][0].get('position_ids') - if model_type in ['Llama', 'Mistral', 'Qwen2'] - else None - ) - - if self.deactive_amp: - self.batch_mask = self._repeat_attention_mask() - else: - self.batch_mask = ( - self._repeat_attention_mask().float() - if self.attention_mask is not None - else None - ) - - self.dev = torch.device('cuda') - self.model_dtype = next(self.model.model.parameters()).dtype - logger.info('self model dtype: {}'.format(self.model_dtype)) - - self.sigmoid = RectifiedSigmoid(-0.1, 1.1) - - def _repeat_attention_mask(self): - if self.attention_mask is not None: - return self.attention_mask.repeat( - self.input['data'][0].shape[0], 1, 1, 1 - ).cuda() - return None - - def w_q(self, module, wquantizer): - args = {} - if self.optimize_scale: - args['output_scale_factor'] = 2 * self.sigmoid(module.buf_output_scale_factor) - if hasattr(module, 'buf_upbound_factor'): - args['upbound_factor'] = module.buf_upbound_factor - args['lowbound_factor'] = None - if hasattr(module, 'buf_lowbound_factor'): - args['lowbound_factor'] = module.buf_lowbound_factor - - return wquantizer.real_quant_weight_dynamic(module.weight.data, args) - - def add_quant_config(self): - self.prefix = self.model.block_name_prefix - self.loss_func = LossFunction(method='l2') - special_config = self.quant_config.get('special', {}) - - self.deactive_amp = special_config.get('deactive_amp', False) - self.wd = special_config.get('wd', None) - self.lr = special_config.get('lr', None) - self.iterations = special_config.get('iterations', 0) - self.batch_size = special_config.get('batch_size', 1) - self.optimize_scale = special_config.get('optimize_scale', False) - self.thresholds = special_config.get('thresholds', []) - self.load_transform = special_config.get('load_transform', False) - self.reduce_memory = special_config.get('reduce_memory', False) - - if self.load_transform: - assert 'scale_path' in special_config, \ - 'scale_path must be specified when load_transform is True' - self.scale_path = special_config['scale_path'] - self.act_scales = torch.load(os.path.join(self.scale_path, 'scales.pth'), - map_location='cpu') - for k in self.act_scales: - self.act_scales[k] = self.act_scales[k].to(torch.float32) - - self.scale_lr = special_config.get('scale_lr', None) - - if self.deactive_amp: - self.dtype = torch.float - self.traincast = nullcontext - else: - self.dtype = torch.bfloat16 - self.traincast = torch.cuda.amp.autocast - - self.aug_loss = special_config.get('aug_loss', None) - - if self.weight_clip and self.clip_version == 'v2': - self.wquantizer.calib_algo = 'learnable' - self.clip_path = special_config.get('clip_path', None) - if self.clip_path: - self.weight_clips = torch.load(os.path.join(self.clip_path, 'clips.pth'), - map_location='cpu') - - self.change_ratio = {} - - def block_forward(self, block, input_data=None): - output = [] - - if input_data is None: - input_data = self.input['data'] - - for i in range(len(input_data)): - input_data[i] = input_data[i].to(device=next(block.parameters()).device) - if ( - 'attention_mask' in self.input['kwargs'][i] - and self.input['kwargs'][i]['attention_mask'] is not None - ): - self.input['kwargs'][i]['attention_mask'] = self.input['kwargs'][i][ - 'attention_mask' - ].cuda() - with torch.no_grad(): - with torch.cuda.amp.autocast(): - out = block(input_data[i], **self.input['kwargs'][i])[0] - output.append(out) - return output - - def get_original_out(self, block): - if self.block_idx == 0: - self.ori_out = self.block_forward(block) - if self.aug_loss: - self.ori_out2 = self.ori_out - else: - self.ori_out = self.block_forward(block, self.ori_out) - if self.aug_loss: - self.ori_out2 = self.block_forward(block) - - @torch.no_grad() - def collect_block_qparams(self, block, input_feat): - named_linears = self.model.get_block_linears(block) - for n, m in named_linears.items(): - args = {} - if hasattr(m, 'buf_lowbound_factor'): - args['lowbound_factor'] = m.buf_lowbound_factor - if hasattr(m, 'buf_upbound_factor'): - args['upbound_factor'] = m.buf_upbound_factor - ( - tensor, - scales, - zeros, - max_int, - min_int, - ) = self.wquantizer.get_tensor_qparams(m.weight.data, args=args) - m.register_buffer('buf_scales', scales) - m.register_buffer('buf_zeros', zeros) - m.register_buffer('buf_qmax', torch.tensor(max_int).to(self.dev)) - m.register_buffer('buf_qmin', torch.tensor(min_int).to(self.dev)) - - if self.act_static: - subsets = self.model.get_subsets_in_block(block) - for index, subset in enumerate(subsets): - layers_dict = subset['layers'] - input_name = subset['input'][0] - input_tensors = copy.deepcopy(input_feat[input_name]) - self.register_act_qparams(layers_dict, input_tensors) - del input_tensors - - @torch.no_grad() - def block_transform(self, block, input_feat, block_kwargs): - logger.info(f'Start transform the {self.block_idx+1}-th block') - - with torch.no_grad(): - block.float() - - if self.online_rotate: - self.replace_rotate_linears(block) - - for i in range(len(self.input['data'])): - self.input['data'][i] = self.input['data'][i].to(self.dtype) - self.get_original_out(block) # collect block output - - if self.load_transform: - self.tesseraq_load_transform(block, input_feat) - if self.weight_clip: - self.tesseraq_weight_clip(block, input_feat) - - self.collect_block_qparams(block, input_feat) # collect quant range after transformation - self.register_tesseraq_parameters(block) - - self.tesseraq_train(block) - self.merge_tesseraq_parameters_and_clear_tmp(block) - self.set_rounding_opt_mode(block, on=False) - - # convert it back to original dtype - if self.reduce_memory: - block.to(self.model_dtype) - - logger.info(f'End transform the {self.block_idx+1}-th block') - - def tesseraq_train(self, block): - self.set_dynamic_tmp_quant(block, on=True) - for n, p in block.named_parameters(): - p.requires_grad = False - - thresholds = self.thresholds - self.input['data'] = torch.cat(self.input['data'], dim=0) - self.ori_out = torch.cat(self.ori_out, dim=0) - - # evaluate loss before reconstruction - with torch.no_grad(): - with torch.cuda.amp.autocast(): - loss_prev = self.get_tesseraq_loss( - block, self.input['data'][:4], self.ori_out[:4] - ) - logger.info( - 'Before TesseraQ, the reconstruction loss: {}'.format(loss_prev.item()) - ) - - for i in range(len(thresholds)): - self.set_rounding_opt_mode(block, on=True) - self.update_mask(block, quantile_threshold=thresholds[i]) - - params_r, params_s = self.get_rounding_parameters(block) - if self.optimize_scale: - optimizer = torch.optim.Adam( - [ - {'params': params_r, 'lr': self.lr}, - { - 'params': params_s, - 'lr': self.scale_lr or self.lr, - 'weight_decay': 1e-4, - }, - ], - lr=self.lr, - ) - else: - optimizer = torch.optim.Adam(params_r, self.lr) - - loss_scaler = NativeScalerWithGradNormCount() - - with torch.enable_grad(): - for p in params_r + params_s: - p.requires_grad = True - - for iters in range(self.iterations): - indices = torch.randperm(self.config['calib']['n_samples'])[ - : self.batch_size - ] - - with self.traincast(): - target2 = self.ori_out2[indices] if self.aug_loss else None - loss = self.get_tesseraq_loss( - block, - self.input['data'][indices], - self.ori_out[indices], - target2, - ) - - if not math.isfinite(loss.item()): - logger.info('Loss is NAN, stopping training') - pdb.set_trace() - - optimizer.zero_grad() - - norm = loss_scaler(loss, optimizer, parameters=params_r + params_s) - - logger.info( - f'block {self.block_idx} iter {i+1} loss:{loss.item():5f} \ - norm:{norm.item():4f} HR progress:{(1-thresholds[i])*100:1f}% ' - ) - for p in params_r + params_s: - p.requires_grad = False - - del optimizer - - for n, m in block.named_modules(): - if isinstance(m, FakeQuantLinear): - # set to hard masking - m.buf_rounding = 100 * m.buf_rounding.sign() - - with torch.no_grad(): - with torch.cuda.amp.autocast(): - loss_now = self.get_tesseraq_loss( - block, self.input['data'][:4], self.ori_out[:4] - ) - self.low_now = loss_now.item() - logger.info( - 'After TesseraQ, the reconstruction loss: {}'.format(loss_now.item()) - ) - - self.input['data'] = list( - torch.split(self.input['data'], split_size_or_sections=1, dim=0) - ) - self.ori_out = list(torch.split(self.ori_out, split_size_or_sections=1, dim=0)) - - @torch.no_grad() - def tesseraq_load_transform(self, block, input_feat): - logger.info('loading scales...') - subsets = self.model.get_subsets_in_block(block) - for index, subset in enumerate(subsets): - prev_op = subset['prev_op'] - layers_dict = subset['layers'] - layers = list(layers_dict.values()) - - if ( - isinstance(prev_op[0], (nn.Linear, FakeQuantLinear)) - and prev_op[0].out_features != layers[0].in_features * 3 - and prev_op[0].out_features != layers[0].in_features - ): - logger.info('Cannot apply scale. Do not transform this subset.') - continue - - for n in layers_dict: - layer_name = f'{self.model.block_name_prefix}.{self.block_idx}.{n}' - scale = self.act_scales[layer_name].cuda() - self.apply_scale(scale, prev_op, layers) - self.update_input_feat(scale, input_feat, layers_dict) - - @torch.no_grad() - def update_input_feat(self, scale, input_feat, layers_dict): - for layer_name in layers_dict: - for i in range(len(input_feat[layer_name])): - inp = input_feat[layer_name][i] - inp.div_(scale.view(1, -1).to(inp.device)) - - def tesseraq_weight_clip(self, block, input_feat): - if self.clip_version == 'v1': - self.auto_clipper.run(block, self.block_idx, input_feat, n_sample_token=512) - elif self.clip_version == 'v2': - logger.info('loading clips...') - for n, m in block.named_modules(): - if isinstance(m, nn.Linear): - if any([_ in n for _ in ['q_', 'k_', 'query', 'key', 'Wqkv']]): - m.register_buffer('buf_upbound_factor', None) - m.register_buffer('buf_lowbound_factor', None) - continue - layer_name = f'{n}.weight_quantizer.' - upbound_factor = self.weight_clips[self.block_idx][ - layer_name + 'upbound_factor' - ] - lowbound_factor = self.weight_clips[self.block_idx][ - layer_name + 'lowbound_factor' - ] - m.register_buffer( - 'buf_upbound_factor', - upbound_factor.cuda().float(), - ) - m.register_buffer( - 'buf_lowbound_factor', - lowbound_factor.cuda().float() - if lowbound_factor is not None - else None, - ) - - def get_tesseraq_loss(self, block, x, target, target2=None): - if self.position_ids is not None: - quant_out = block( - x, attention_mask=self.batch_mask, position_ids=self.position_ids - )[0] - else: - quant_out = block(x, attention_mask=self.batch_mask)[0] - - loss = self.loss_func(target, quant_out) - if target2 is not None: - loss = (loss + self.loss_func(target2, quant_out)) / 2 - return loss - - def register_tesseraq_parameters(self, block): - module = FakeQuantLinear - self.model.replace_module_block( - module, - block, - self.block_idx, - self.get_replacement_params( - mode='fake_quant', w_only=self.w_only, name=None - ), - ) - self.register_rounding_parameters(block) - - def register_rounding_parameters(self, block): - for n, m in block.named_modules(): - if isinstance(m, FakeQuantLinear): - rounding = m.weight.data.clone() - scales = m.buf_scales - rounding = self.wquantizer.reshape_tensor(rounding).div(scales) - rounding = rounding - torch.floor(rounding) - rounding = self.sigmoid.inverse(rounding) - - m.register_buffer('buf_rounding', rounding) - - if self.optimize_scale: - m.register_buffer('buf_output_scale_factor', torch.zeros_like(scales)) - - @torch.no_grad() - def update_mask(self, block, quantile_threshold): - for n, m in block.named_modules(): - if isinstance(m, FakeQuantLinear): - score = (self.sigmoid(m.buf_rounding) - 0.5).abs().cpu() - value = np.quantile(score.numpy(), q=quantile_threshold) - m.buf_rounding[self.sigmoid(m.buf_rounding) > (value + 0.5)] = float('inf') - m.buf_rounding[self.sigmoid(m.buf_rounding) < (0.5 - value)] = -float('inf') - del score - - def set_rounding_opt_mode(self, block, on=True): - for n, m in block.named_modules(): - if isinstance(m, FakeQuantLinear): - if not hasattr(m, 'buf_rounding_opt'): - m.register_buffer('buf_rounding_opt', torch.tensor(on)) - else: - m.buf_rounding_opt = torch.tensor(on) - - def set_dynamic_tmp_quant(self, block, on=True): - for n, m in block.named_modules(): - if isinstance(m, FakeQuantLinear): - m.dynamic_quant_tmp_weight = on - - def get_rounding_parameters(self, block): - params_r = [] - params_s = [] - for n, m in block.named_modules(): - if isinstance(m, FakeQuantLinear): - params_r += [m.buf_rounding] - if self.optimize_scale: - params_s += [m.buf_output_scale_factor] - return params_r, params_s - - def merge_tesseraq_parameters_and_clear_tmp(self, block): - for n, m in block.named_modules(): - if isinstance(m, FakeQuantLinear): - m.buf_rounding = (m.buf_rounding > 0).float() - w_shape = m.weight.shape - W = self.wquantizer.reshape_tensor(m.weight.data) / m.buf_scales - m.buf_rounding = m.buf_rounding - (W - torch.floor(W) > 0.5).float() - - cr = torch.count_nonzero(m.buf_rounding) / m.buf_rounding.numel() - if n not in self.change_ratio: - self.change_ratio[n] = 0 - self.change_ratio[n] = self.change_ratio[n] + cr - logger.info('layer {}, change ratio: {}%' - .format(n, self.change_ratio[n] / (self.block_idx + 1))) - m.buf_rounding *= 0.5 * m.buf_scales - m.buf_rounding = self.wquantizer.restore_tensor(m.buf_rounding, w_shape) - m.weight.data.add_(m.buf_rounding.to(self.model_dtype)) - - delattr(m, 'buf_rounding') - delattr(m, 'tmp_weight') - delattr(m, 'tmp_bias') - m.dynamic_quant_weight = False - m.dynamic_quant_tmp_weight = False - - gc.collect() - torch.cuda.empty_cache() - - def cache_input_hook(self, m, x, y, name, feat_dict): - super(TesseraQ, self).cache_input_hook(m, x, y, name, feat_dict) - if len(feat_dict[name]) > 128: - del feat_dict[name][-1] - - def w_qdq(self, module, wquantizer): - weight = module.weight - - args = {} - args['scales'] = module.buf_scales - if hasattr(module, 'buf_zeros'): - args['zeros'] = module.buf_zeros - else: - args['zeros'] = None - args['qmax'] = module.buf_qmax - args['qmin'] = module.buf_qmin - - if hasattr(module, 'buf_rounding_opt') and module.buf_rounding_opt: - args['rounding'] = self.sigmoid(module.buf_rounding) - - if self.optimize_scale: - args['output_scale_factor'] = 2 * self.sigmoid(module.buf_output_scale_factor) - - weight = wquantizer.fake_quant_weight_static(weight, args) - - return weight - - def deploy(self, quant_format): - super().deploy(quant_format) - self.model.convert_dtype(self.model_dtype) - - def save_model(self, path): - self.model.convert_dtype(self.model_dtype) - super().save_model(path) diff --git a/llmc/compression/quantization/train_utils.py b/llmc/compression/quantization/train_utils.py deleted file mode 100644 index 8496920fd..000000000 --- a/llmc/compression/quantization/train_utils.py +++ /dev/null @@ -1,132 +0,0 @@ -import os -import sys -import time -from math import inf - -import torch -import torch.nn as nn -from loguru import logger - - -class AvgMeter: - def __init__(self): - self.num = 0 - self.s = 0 - self.m = 0 - - def update(self, value): - self.num += 1 - prev = value - self.m - self.m = self.m + (value - self.m) / self.num - now = value - self.m - self.s = self.s + prev * now - - def get(self): - # assert self.num > 1 - return round(self.m, 4), round(self.s / (self.num - 1), 5) - - -class TruncateFunction(torch.autograd.Function): - @staticmethod - def forward(ctx, input, threshold): - truncated_tensor = input.clone() - truncated_tensor[truncated_tensor.abs() < threshold] = ( - truncated_tensor[truncated_tensor.abs() < threshold].sign() * threshold - ) - return truncated_tensor - - @staticmethod - def backward(ctx, grad_output): - grad_input = grad_output.clone() - return grad_input, None - - -class LossFunction: - def __init__(self, method='mse', reduction='mean', dim=0): - self.method = method - self.reduction = reduction - self.dim = dim - - def l2_loss(self, x, y): - return (x - y).pow(2).sum(-1).mean() - - def __call__(self, f_out, q_out): - # L2 Loss - if self.method == 'l2': - return self.l2_loss(f_out, q_out) - - # MSE Loss - elif self.method == 'mse': - mse_loss = nn.MSELoss(reduction=self.reduction) - return mse_loss(f_out, q_out) - - # Distribution Loss - elif self.method == 'dist': - mse_loss = nn.MSELoss(reduction=self.reduction) - - channel_num = f_out.shape[-1] - f_out = f_out.reshape(-1, channel_num) - q_out = q_out.reshape(-1, channel_num) - - mean_error = mse_loss(f_out.mean(dim=self.dim), q_out.mean(dim=self.dim)) - std_error = mse_loss(f_out.std(dim=self.dim), q_out.std(dim=self.dim)) - return mean_error + std_error - - # KL divergence Loss - elif self.method == 'kl': - kl_loss = nn.KLDivLoss(reduction=self.reduction) - return kl_loss(f_out, q_out) - - -class NativeScalerWithGradNormCount: - def __init__(self): - self._scaler = torch.cuda.amp.GradScaler() - - def __call__( - self, - loss, - optimizer, - clip_grad=None, - parameters=None, - create_graph=False, - update_grad=True, - retain_graph=False, - ): - self._scaler.scale(loss).backward( - create_graph=create_graph, retain_graph=retain_graph - ) - if update_grad: - if clip_grad is not None: - assert parameters is not None - self._scaler.unscale_(optimizer) - norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad) - else: - self._scaler.unscale_(optimizer) - norm = self.ampscaler_get_grad_norm(parameters) - self._scaler.step(optimizer) - self._scaler.update() - else: - norm = None - return norm - - def ampscaler_get_grad_norm(self, parameters, norm_type=2.0): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = [p for p in parameters if p.grad is not None] - norm_type = float(norm_type) - if len(parameters) == 0: - return torch.tensor(0.0) - device = parameters[0].grad.device - if norm_type == inf: - total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters) - else: - total_norm = torch.norm( - torch.stack( - [ - torch.norm(p.grad.detach(), norm_type).to(device) - for p in parameters - ] - ), - norm_type, - ) - return total_norm diff --git a/llmc/compression/quantization/utils.py b/llmc/compression/quantization/utils.py deleted file mode 100755 index 588bf3e99..000000000 --- a/llmc/compression/quantization/utils.py +++ /dev/null @@ -1,17 +0,0 @@ -import torch - - -def make_divisible(c, divisor): - return (c + divisor - 1) // divisor - - -def is_fp8_supported_gpu(): - if not torch.cuda.is_available(): - return False - compute_capability = torch.cuda.get_device_capability(0) - major, minor = compute_capability - return (major == 8 and minor == 9) or (major >= 9) - - -def ceil_div(x, y): - return (x + y - 1) // y diff --git a/llmc/compression/sparsification/__init__.py b/llmc/compression/sparsification/__init__.py deleted file mode 100644 index acff99218..000000000 --- a/llmc/compression/sparsification/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .base_blockwise_sparsification import BaseBlockwiseSparsification -from .dense import Dense -from .kvsparse import ShadowKVCache, SinkKVCache -from .magnitude import Magnitude -from .shortgpt import ShortGPT -from .wanda import Wanda diff --git a/llmc/compression/sparsification/attn_utils.py b/llmc/compression/sparsification/attn_utils.py deleted file mode 100644 index dfec520ca..000000000 --- a/llmc/compression/sparsification/attn_utils.py +++ /dev/null @@ -1,179 +0,0 @@ -import torch -import torch.nn as nn -from loguru import logger -from transformers.models.llama.modeling_llama import (apply_rotary_pos_emb, - repeat_kv) - - -def _update_causal_mask(causal_mask, attention_mask, input_tensor, past_seen_tokens): - - batch_size, seq_length = input_tensor.shape[:2] - dtype = input_tensor.dtype - device = input_tensor.device - - # We use the current dtype to avoid any overflows - min_dtype = torch.finfo(dtype).min - causal_mask = causal_mask[None, None, :, :].to(dtype=dtype, device=device) * min_dtype - causal_mask = causal_mask.expand(batch_size, 1, -1, -1) - if attention_mask is not None: - causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit - if attention_mask.dim() == 2: - mask_length = attention_mask.shape[-1] - padding_mask = causal_mask[..., :mask_length].eq(0.0)\ - * attention_mask[:, None, None, :].eq(0.0) - causal_mask[..., :mask_length] = \ - causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype) - elif attention_mask.dim() == 4: - # backwards compatibility: we allow passing a - # 4D attention mask shorter than the input length with - # cache. In that case, the 4D attention mask attends to the newest tokens only. - if attention_mask.shape[-2] < past_seen_tokens + input_tensor.shape[1]: - offset = past_seen_tokens - else: - offset = 0 - mask_shape = attention_mask.shape - mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype - causal_mask[ - : mask_shape[0], : mask_shape[1], - offset: mask_shape[2] + offset, : mask_shape[3]] = mask_slice - - return causal_mask - - -def eager_attention_forward( - module, - query, - key, - value, - attention_mask, - scaling, - dropout, - **kwargs, -): - key_states = repeat_kv(key, module.num_key_value_groups) - value_states = repeat_kv(value, module.num_key_value_groups) - - attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling - if attention_mask is not None: - causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] - attn_weights = attn_weights + causal_mask - - attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) - attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) - attn_output = torch.matmul(attn_weights, value_states) - attn_output = attn_output.transpose(1, 2).contiguous() - - return attn_output, attn_weights - - -class ShadowKVAttention(nn.Module): - def __init__(self, module): - super().__init__() - self.config = module.config - self.layer_idx = module.layer_idx - self.head_dim = module.head_dim - self.num_key_value_groups = module.num_key_value_groups - self.scaling = self.head_dim**-0.5 - self.attention_dropout = module.attention_dropout - self.is_causal = True - - self.q_proj = module.q_proj - self.k_proj = module.k_proj - self.v_proj = module.v_proj - self.o_proj = module.o_proj - - def forward( - self, - hidden_states, - position_embeddings, - position_ids, - attention_mask, - past_key_value, - output_attentions, - use_cache, - cache_position, - retrieval_position_ids=None, - cos_sin_cache=None, - **kwargs, - ): - - bsz, q_len, _ = hidden_states.size() - - query_states = self.q_proj(hidden_states) - key_states = self.k_proj(hidden_states) - value_states = self.v_proj(hidden_states) - - query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) - key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) - value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) - - if past_key_value is not None and past_key_value.prefill: - past_key_value.get_svd(key_states, layer_idx=self.layer_idx) - - cos, sin = position_embeddings - query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) - - if past_key_value is not None: - key_states, value_states = \ - past_key_value.update(key_states, - value_states, - self.layer_idx, - retrieval_position_ids, - cos_sin_cache) - - key_states = repeat_kv(key_states, self.num_key_value_groups) - value_states = repeat_kv(value_states, self.num_key_value_groups) - - causal_mask = attention_mask - if attention_mask is not None: - causal_mask = causal_mask[:, :, :, : key_states.shape[-2]] - - # SDPA with memory-efficient backend is currently (torch==2.1.2) - # bugged with non-contiguous inputs with custom attn_mask, - # Reference: https://github.com/pytorch/pytorch/issues/112577. - if query_states.device.type == 'cuda' and causal_mask is not None: - query_states = query_states.contiguous() - key_states = key_states.contiguous() - value_states = value_states.contiguous() - - # We dispatch to SDPA's Flash Attention or Efficient kernels - # via this `is_causal` if statement instead of an inline conditional assignment - # in SDPA to support both torch.compile's dynamic shapes and full graph options. - # An inline conditional prevents dynamic shapes from compiling. - is_causal = True if causal_mask is None and q_len > 1 else False - - attn_output = torch.nn.functional.scaled_dot_product_attention( - query_states, - key_states, - value_states, - attn_mask=causal_mask, - dropout_p=self.attention_dropout if self.training else 0.0, - is_causal=is_causal, - ) - - attn_output = attn_output.transpose(1, 2).contiguous() - attn_output = attn_output.view(bsz, q_len, -1) - - attn_output = self.o_proj(attn_output) - - return attn_output, None, past_key_value - - @classmethod - @torch.no_grad() - def new(cls, module): - new_module = cls(module) - return new_module - - def __repr__(self): - return ( - f'ShadowKVAttention(\n' - f' (q_proj): {self.q_proj}\n' - f' (k_proj): {self.k_proj}\n' - f' (v_proj): {self.v_proj}\n' - f' (o_proj): {self.o_proj}\n' - f' (kvcache): {self.kvcache}\n' - f')' - ) - - -_LLMC_ATTN_MAP_ = {'ShadowKV': {'Llama': ShadowKVAttention}} diff --git a/llmc/compression/sparsification/base_blockwise_sparsification.py b/llmc/compression/sparsification/base_blockwise_sparsification.py deleted file mode 100644 index 42d73dfa2..000000000 --- a/llmc/compression/sparsification/base_blockwise_sparsification.py +++ /dev/null @@ -1,204 +0,0 @@ -import functools -import gc -from collections import defaultdict - -import torch -from loguru import logger - -from llmc.utils import copy_files -from llmc.utils.registry_factory import KV_REGISTRY - -from ..blockwise_optimization import BlockwiseOpt -from .attn_utils import _LLMC_ATTN_MAP_ - - -class BaseBlockwiseSparsification(BlockwiseOpt): - def __init__(self, model, sparsity_config, input, padding_mask, config): - super().__init__(model, sparsity_config, input, padding_mask, config) - self.set_sparsity_config() - - def block_init(self, block): - pass - - def set_sparsity_config(self): - if 'sparsity_out' in self.sparsity_config and self.sparsity_config[ - 'sparsity_out' - ]: - self.sparsity_out = True - else: - self.sparsity_out = False - logger.info(f'use sparsity_out {self.sparsity_out}') - - # set kv cache sparse config - if 'kvcache' in self.sparsity_config: - self.sparse_kvcache = True - self.set_kv_sparse_config() - else: - self.sparse_kvcache = False - - if 'weight' in self.sparsity_config: - if 'sparsity' in self.sparsity_config['weight']: - self.sparsity = self.sparsity_config['weight']['sparsity'] - self.W_mask = None - elif 'n_prune_layers' in self.sparsity_config['weight']: - self.n_prune_layers = self.sparsity_config['weight']['n_prune_layers'] - - def set_kv_sparse_config(self): - kv_sparse_config = {} - if self.sparsity_config['kvcache']['method'] == 'ShadowKV': - assert self.config['model']['type'] in ['Llama'] - assert self.config['eval'].get('type', None) != 'decode_ppl' - inv_freq = \ - self.model.model.model.layers[0].self_attn.rotary_emb.inv_freq.cuda() - cos_cache, sin_cache = self.set_cos_sin_cache(inv_freq) - self.cos_sin_cache = (cos_cache, sin_cache) - kv_sparse_config['config'] = self.model.model_config - elif self.sparsity_config['kvcache']['method'] == 'SinkKV': - kv_sparse_config['num_hidden_layers'] = self.model.model_config.num_hidden_layers - kv_sparse_config['window_length'] = self.sparsity_config['kvcache']['window_length'] - kv_sparse_config['num_sink_tokens'] = self.sparsity_config['kvcache']['num_sink_tokens'] - self.kv_module = KV_REGISTRY[self.sparsity_config['kvcache']['method']](**kv_sparse_config) - self.replace_attn = self.sparsity_config['kvcache'].get('replace_attn', False) - self.model.kvcache_buffer.append(self.kv_module) - - def set_cos_sin_cache(self, inv_freq): - max_length = 64 * 1024 - t = torch.arange(max_length + 1024, device=torch.device('cuda'), dtype=inv_freq.dtype) - freqs = torch.outer(t, inv_freq) - emb = torch.cat((freqs, freqs), dim=-1) - return emb.cos().to(torch.bfloat16), emb.sin().to(torch.bfloat16) - - @torch.no_grad() - def register_kv_cache(self, block): - attn_layers_dict = self.model.get_attn_in_block(block) - attn_layer = attn_layers_dict[list(attn_layers_dict.keys())[0]] - setattr(attn_layer, 'kvcache', self.kv_module) - attn_layer.register_forward_pre_hook( - self.kv_cache_input_hook(attn_layer), with_kwargs=True - ) - - def replace_attention(self, block): - attn_layers_dict = self.model.get_attn_in_block(block) - layers_dict = {'layers': attn_layers_dict} - kv_method = self.sparsity_config['kvcache']['method'] - model_type = self.config['model']['type'] - attn_module = _LLMC_ATTN_MAP_[kv_method][model_type] - self.model.replace_module_subset( - attn_module, - block, - layers_dict, - self.block_idx, - {} - ) - - def block_forward(self, block, input_data=None): - output = [] - if input_data is None: - input_data = self.input['data'] - - for i in range(len(input_data)): - input_data[i] = input_data[i].to(device=next(block.parameters()).device) - if 'attention_mask' in self.input[ - 'kwargs' - ][i] and self.input['kwargs'][i]['attention_mask'] is not None: - self.input['kwargs'][i]['attention_mask'] = self.input['kwargs'][i][ - 'attention_mask' - ].cuda() - with torch.no_grad(): - out = block(input_data[i], **self.input['kwargs'][i])[0] - output.append(out) - return output - - def block_opt(self, block): - if self.sparse_kvcache: - if self.replace_attn: - self.replace_attention(block) - self.register_kv_cache(block) - block = block.cuda() - - if not self.data_free: - named_linears = self.model.get_block_linears(block) - logger.info(f'named_linears: {named_linears}') - input_feat = defaultdict(list) - handles = [] - self.block_init(block) - - for name in named_linears: - handles.append( - named_linears[name].register_forward_hook( - functools.partial( - self.cache_input_hook, name=name, feat_dict=input_feat - ) - ) - ) - - if not self.sparsity_out: - self.input['data'] = self.block_forward(block) - else: - self.block_forward(block) - for h in handles: - h.remove() - torch.cuda.empty_cache() - - self.block_transform(block, input_feat, self.input['kwargs']) - - if self.sparsity_out: - self.input['data'] = self.block_forward(block) - - block = block.cpu() - del input_feat - gc.collect() - torch.cuda.empty_cache() - - else: - self.block_transform(block) - - def block_transform(self, block, input_feat, block_kwargs): - logger.info(f'Start transform the {self.block_idx+1}-th block') - subsets = self.model.get_subsets_in_block(block) - for index, subset in enumerate(subsets): - if not self.filter_subset(subset): - continue - # logger.info(f"subset: {subset}") - prev_op = subset['prev_op'] - layers_dict = subset['layers'] - input_name = subset['input'][0] - inspect_module = subset['inspect'] - inspect_has_kwargs = subset['has_kwargs'] - subset_kwargs = block_kwargs if inspect_has_kwargs else {} - self.subset_transform( - layers_dict, - input_feat, - prev_op, - input_name, - inspect_module, - subset_kwargs - ) - logger.info(f'End transform the {self.block_idx+1}-th block') - - def filter_subset(self, subset): - return True - - @torch.no_grad() - def deploy(self, deploy_format): - logger.info('-- deploy_sparsity_model start --') - logger.info(f'sparsity_config : {self.sparsity_config}') - logger.info('-- deploy_sparsity_model done --') - - @torch.no_grad() - def copy_tokenizer(self, path): - self.model.tokenizer.save_pretrained(path) - logger.info('copy tokenizer done --') - - @torch.no_grad() - def save_model(self, path): - if self.config.model.type == 'Llava': - self.model.llava_model.language_model = self.model.get_model() - self.model.llava_model.save_pretrained(path) - logger.info('save model done --') - self.copy_tokenizer(path) - copy_files(self.config.model.path, path, 'preprocessor_config') - else: - self.model.get_model().save_pretrained(path) - logger.info('save model done --') - self.copy_tokenizer(path) diff --git a/llmc/compression/sparsification/dense.py b/llmc/compression/sparsification/dense.py deleted file mode 100644 index dad9612b8..000000000 --- a/llmc/compression/sparsification/dense.py +++ /dev/null @@ -1,16 +0,0 @@ -from loguru import logger - -from llmc.utils.registry_factory import ALGO_REGISTRY - -from .base_blockwise_sparsification import BaseBlockwiseSparsification - - -@ALGO_REGISTRY -class Dense(BaseBlockwiseSparsification): - def __init__(self, model, sparsity_config, input, padding_mask, config): - super().__init__(model, sparsity_config, input, padding_mask, config) - - def block_transform(self, block): - logger.info(f'Start transform the {self.block_idx+1}-th block') - logger.info(block) - logger.info(f'End transform the {self.block_idx+1}-th block') diff --git a/llmc/compression/sparsification/kvsparse.py b/llmc/compression/sparsification/kvsparse.py deleted file mode 100644 index b251ae47f..000000000 --- a/llmc/compression/sparsification/kvsparse.py +++ /dev/null @@ -1,653 +0,0 @@ - -import math - -import torch -import torch.nn as nn -from loguru import logger -from transformers import DynamicCache - -from llmc.utils.registry_factory import KV_REGISTRY - - -def apply_rotary_pos_emb_single(q, cos, sin, position_ids, unsqueeze_dim=1): - # if position_ids shape is (batch_size, num_heads, seq_len), - # then reshape it to (batch_size*num_heads, seq_len) - if len(position_ids.shape) == 3: - position_ids = position_ids.view(-1, position_ids.size(-1)) - cos = cos[position_ids] - sin = sin[position_ids] - q_embed = (q * cos) + (rotate_half(q) * sin) - - else: - cos = cos[position_ids].unsqueeze(unsqueeze_dim) - sin = sin[position_ids].unsqueeze(unsqueeze_dim) - q_embed = (q * cos) + (rotate_half(q) * sin) - return q_embed - - -def rotate_half(x): - x1 = x[..., : x.shape[-1] // 2] - x2 = x[..., x.shape[-1] // 2:] - return torch.cat((-x2, x1), dim=-1) - - -@KV_REGISTRY.register('ShadowKV') -class ShadowKVCache(DynamicCache): - """ShadowKV, only for accuracy measurement and understanding, not for - efficiency, please refer to ShadowKV_CPU for the efficient - implementation.""" - - def __init__( - self, - config, - batch_size=1, - max_length=32 * 1024, - device='cuda:0', - dtype=torch.bfloat16, - sparse_budget=1024, - chunk_size=8, - rank=160, - outlier_chunk=48 - ): - - super().__init__() - self.config = config - self.batch_size = batch_size - self.max_length = max_length - self.device = device - self.dtype = dtype - self.num_key_value_groups = ( - config.num_attention_heads // config.num_key_value_heads - ) - self.head_dim = config.hidden_size // config.num_attention_heads - self.num_attention_heads = config.num_attention_heads - self.num_key_value_heads = config.num_key_value_heads - - self.sparse_budget = int(sparse_budget) - self.chunk_size = chunk_size - self.rank = rank - self.local_chunk = 4 - self.outlier_chunk = outlier_chunk - - assert self.batch_size == 1, 'ShadowKV class only supports batch_size=1' - - self.selected_chunk_idx = torch.zeros( - config.num_hidden_layers, - batch_size, - config.num_key_value_heads, - self.sparse_budget // self.chunk_size, - device=self.device, - dtype=torch.long, - ) - - self.v_cache_cpu = torch.zeros( - config.num_hidden_layers, - batch_size, - config.num_key_value_heads, - self.max_length, - self.config.hidden_size // self.config.num_attention_heads, - device=self.device, - dtype=self.dtype, - ) - - self.k_cache_buffer = torch.zeros( - config.num_hidden_layers, - batch_size, - config.num_key_value_heads, - self.sparse_budget + 4096, - self.config.hidden_size // self.config.num_attention_heads, - device=self.device, - dtype=self.dtype, - ) - - self.v_cache_buffer = torch.zeros( - config.num_hidden_layers, - batch_size, - config.num_key_value_heads, - self.sparse_budget + 4096, - self.config.hidden_size // self.config.num_attention_heads, - device=self.device, - dtype=self.dtype, - ) - - self.num_layers = config.num_hidden_layers - self.kv_offset = 0 - self.prefill = 0 - self.gen_offset = 0 - - self.k_landmark = None - self.k_landmark_idx = None - self.U = None - self.SV = None - - self.copy_stream = torch.cuda.Stream() - self.prefill = True - self.prefill_layers = 0 - - def update( - self, - key_states, - value_states, - layer_idx, - retrieval_position_ids, - cos_sin_cache, - ): - # Update the cache - if self.prefill_layers == layer_idx: - # Prefill - self.prefill_kv_cache(value_states, layer_idx, key_states) - self.prefill_layers += 1 - if layer_idx == self.num_layers - 1: - self.prefill = False - self.prefill_layers = -1 - return key_states, value_states - else: - # Decode - self.update_kv_cache(key_states, value_states, layer_idx) - value_states = self.get_value_cache(layer_idx, retrieval_position_ids) - key_states = self.get_key_cache( - layer_idx, retrieval_position_ids, cos_sin_cache - ) - - return key_states, value_states - - def _reset_states(self): - self.k_cache_buffer.zero_() - self.v_cache_buffer.zero_() - self.selected_chunk_idx.zero_() - self.k_landmark = None - self.k_landmark_idx = None - self.U = None - self.SV = None - - self.kv_offset = 0 - self.prefill = 0 - self.gen_offset = 0 - self.prefill_local = 0 - - self.key_cache = [] - self.value_cache = [] - self._seen_tokens = 0 - self.prefill = True - self.prefill_layers = 0 - - def get_seq_length(self, layer_idx=0): - return self.kv_offset - - def get_svd(self, new_k_cache, layer_idx): - # [bsz, 8, prefill, 128] OR [bsz, prefill, 1024] - if new_k_cache.shape[1] <= 32: - # [bsz, 8, prefill, 128] --> [bsz, prefill, 1024] - k_cache = new_k_cache.transpose(1, 2).reshape( - self.batch_size, -1, self.num_key_value_heads * self.head_dim - ) - else: - # [bsz, prefill, 1024] - k_cache = new_k_cache - - if layer_idx == 0: - # init U, SV - self.U = torch.zeros( - self.num_layers, - self.batch_size, - k_cache.shape[1], - self.rank, - device=self.device, - dtype=self.dtype, - ) - self.SV = torch.zeros( - self.num_layers, - self.batch_size, - self.num_key_value_heads, - self.rank, - self.head_dim, - device=self.device, - dtype=self.dtype, - ) - - u, s, v = torch.svd(k_cache.float()) - v = v.transpose(1, 2) - # [bsz, 128k, 1024] --> [bsz, 128k, 160] [bsz, 160, 1024] (bsz, 8, 160, 128) - self.U[layer_idx].copy_(u[:, :, : self.rank].to(self.dtype)) # [bsz, 128k, 160] - self.SV[layer_idx].copy_( - torch.matmul(torch.diag_embed(s[:, : self.rank]), v[:, : self.rank]) - .to(self.dtype) - .view(self.batch_size, -1, self.num_key_value_heads, self.head_dim) - .transpose(1, 2) - ) # [bsz, 8, 160, 128] - - def register_k_landmark(self, k_landmark, k_landmark_idx, layer_idx): - num_landmarks = k_landmark.shape[-2] - if layer_idx == 0: - # init k_landmark, k_landmark_idx - self.k_landmark = torch.zeros( - self.num_layers, - self.batch_size, - self.num_key_value_heads, - num_landmarks, - self.head_dim, - device=self.device, - dtype=self.dtype, - ) - self.k_landmark_idx = torch.zeros( - self.num_layers, - self.batch_size, - self.num_key_value_heads, - num_landmarks, - device=self.device, - dtype=torch.long, - ) - - self.k_landmark[layer_idx].copy_(k_landmark.contiguous()) - self.k_landmark_idx[layer_idx].copy_(k_landmark_idx.contiguous()) - - def prefill_kv_cache( - self, - new_v_cache, - layer_idx, - key_states_roped, - ): - - incoming = new_v_cache.shape[-2] # [bsz, num_kv_heads, incoming, head_dim] - self.prefill = incoming - self.v_cache_cpu[layer_idx][:, :, :incoming] = new_v_cache.clone() - - # [x0, x1, ...., self.chunks*chunk_size, local_chunk, rest] - self.chunks = incoming // self.chunk_size - self.local_chunk - self.select_sets = self.sparse_budget // self.chunk_size - - assert ( - self.select_sets * self.chunk_size == self.sparse_budget - ), f'({self.select_sets}) * {self.chunk_size} != {self.sparse_budget}' - - # store Post-RoPE k cache to the cache - self.prefill_local = ( - incoming - self.chunks * self.chunk_size - ) # local chunks + align to chunk_size - self.k_cache_buffer[layer_idx][:, :, : self.prefill_local].copy_( - key_states_roped[:, :, -self.prefill_local:] - ) - self.v_cache_buffer[layer_idx][:, :, : self.prefill_local].copy_( - new_v_cache[:, :, -self.prefill_local:] - ) - - key_states_roped_ctx = key_states_roped[ - :, :, : self.chunks * self.chunk_size - ].view( - self.batch_size, - self.num_key_value_heads, - self.chunks, - self.chunk_size, - self.head_dim, - ) - landmark_candidates = key_states_roped_ctx.mean( - dim=-2 - ) # [bsz, kv_heads, chunks, head_dim] - - # compute the cos similarity between it and the original key cache - cos_sim = torch.nn.functional.cosine_similarity( - landmark_candidates.unsqueeze(3).expand(-1, -1, -1, self.chunk_size, -1), - key_states_roped_ctx, - dim=-1, - ) # [bsz, kv_heads, chunks, chunk_size] - - # get the outlier_chunk idx for each head # [bsz, kv_heads, outlier_chunk] - outlier_chunk_idx = ( - cos_sim.min(dim=-1).values.topk(self.outlier_chunk, largest=False).indices - ) - - outlier_chunk_k_cache = key_states_roped_ctx.gather( - dim=2, - index=outlier_chunk_idx.unsqueeze(-1) - .unsqueeze(-1) - .expand(-1, -1, -1, self.chunk_size, self.head_dim), - ).view( - self.batch_size, - self.num_key_value_heads, - self.outlier_chunk * self.chunk_size, - self.head_dim, - ) - - outlier_chunk_v_cache = ( - new_v_cache[:, :, : self.chunks * self.chunk_size] - .view( - self.batch_size, - self.num_key_value_heads, - self.chunks, - self.chunk_size, - self.head_dim, - ) - .gather( - dim=2, - index=outlier_chunk_idx.unsqueeze(-1) - .unsqueeze(-1) - .expand(-1, -1, -1, self.chunk_size, self.head_dim), - ) - .view( - self.batch_size, - self.num_key_value_heads, - self.outlier_chunk * self.chunk_size, - self.head_dim, - ) - ) - - self.sparse_start = self.prefill_local + self.outlier_chunk * self.chunk_size - self.sparse_end = ( - self.prefill_local - + self.outlier_chunk * self.chunk_size - + self.sparse_budget - ) - - # store outlier_chunk to the cache - self.k_cache_buffer[layer_idx][ - :, :, self.prefill_local: self.sparse_start - ].copy_(outlier_chunk_k_cache) - self.v_cache_buffer[layer_idx][ - :, :, self.prefill_local: self.sparse_start - ].copy_(outlier_chunk_v_cache) - - # filter landmark_candidates using outlier_chunk and register the rest to k_landmark - # [bsz, kv_heads, chunks, head_dim] --> [bsz, kv_heads, chunks - outlier_chunk, head_dim] - # get rest_idx: [bsz, kv_heads, chunks] --filter--> [bsz, kv_heads, chunks - outlier_chunk] - all_idx = ( - torch.arange(self.chunks, device=key_states_roped.device) - .unsqueeze(0) - .unsqueeze(0) - .expand(self.batch_size, self.num_key_value_heads, -1) - ) # [bsz, kv_heads, chunks] - mask = torch.ones_like(all_idx, dtype=torch.bool) - mask.scatter_(dim=-1, index=outlier_chunk_idx, value=False) - rest_idx = all_idx.masked_select(mask).view( - self.batch_size, self.num_key_value_heads, -1 - ) - - # register rest_idxed landmarks to k_landmark - self.register_k_landmark( - landmark_candidates.gather( - dim=2, index=rest_idx.unsqueeze(-1).expand(-1, -1, -1, self.head_dim) - ).view(self.batch_size, self.num_key_value_heads, -1, self.head_dim), - rest_idx, - layer_idx, - ) - - if layer_idx == self.num_layers - 1: - assert self.sparse_budget < incoming - self.kv_offset += incoming - - def get_retrieval_position_ids(self, layer_idx, query_states): - # self.k_landmark[layer_idx][:, :, :self.chunks] is [bsz, 8, chunks, head_dim] - # chunk_attn: [bsz, 32, window_size, chunks] - self.incoming_q_len = query_states.shape[-2] # 1 - # [bsz, 8, 4, q_len, 128] * [bsz, 8, 128, chunks] --> [bsz, 8, 4, q_len, chunks] - chunk_attn = torch.einsum( - 'bhgqd,bhdc->bhgqc', - query_states.view( - -1, - self.num_key_value_heads, - self.num_key_value_groups, - self.incoming_q_len, - self.head_dim, - ), - self.k_landmark[layer_idx].transpose(2, 3), - ).squeeze(2) / math.sqrt(128) - chunk_attn = nn.functional.softmax(chunk_attn, dim=-1, dtype=torch.float32).to( - self.dtype - ) # [bsz, 8, 4, q_len, chunks] - chunk_attn = chunk_attn.sum(dim=-2) # [bsz, 8, 4, chunks] - if self.num_key_value_groups > 1: - chunk_attn, _ = torch.max(chunk_attn, dim=-2) # [bsz, 8, chunks] - - merged_results = torch.topk( - chunk_attn, k=self.select_sets, dim=-1 - ).indices # [bsz, 8, select_sets(256)] - - # use merged_results to gather the position_ids: - # [bsz, 8, select_sets] --> [bsz, 8, select_sets] - selected_chunks = self.k_landmark_idx[layer_idx].gather( - dim=-1, index=merged_results - ) # [bsz, 8, select_sets] - - # this is chunk idx, which can be used to offload value cache and decide if the cache hits - self.selected_chunk_idx[layer_idx].copy_(selected_chunks, non_blocking=True) - - position_ids = ( - selected_chunks.unsqueeze(-1) * self.chunk_size - + torch.arange(self.chunk_size, device=chunk_attn.device) - .unsqueeze(0) - .unsqueeze(0) - .unsqueeze(0) - ).view( - self.batch_size, self.num_key_value_heads, -1 - ) # [bsz, 8, select_sets * chunk_size] - - return position_ids - - def get_value_cache(self, layer_idx, retrieval_position_ids): - # gather value cache - value_ = self.v_cache_cpu[layer_idx].gather( - dim=-2, - index=retrieval_position_ids.unsqueeze(-1).expand( - -1, -1, -1, self.head_dim - ), - ) - self.v_cache_buffer[layer_idx][:, :, self.sparse_start: self.sparse_end].copy_( - value_, non_blocking=True - ) - gen_offset = ( - self.gen_offset - if layer_idx == self.num_layers - 1 - else self.gen_offset + self.incoming_q_len - ) - - return self.v_cache_buffer[layer_idx][:, :, : self.sparse_end + gen_offset] - - def get_key_cache(self, layer_idx, retrieval_position_ids, cos_sin_cache): - # gather key cache and rope them - u = self.U[layer_idx] # [bsz, 128k, rank] - sv = self.SV[layer_idx] # [bsz, 8, rank, 128] - - # indexing, [bsz, 8, sparse_budget, rank] - index_expanded = retrieval_position_ids.unsqueeze(-1).expand( - -1, -1, -1, u.size(-1) - ) # [bsz, 8, sparse_budget, rank] - u_expand = u.unsqueeze(1).expand( - -1, self.num_key_value_heads, -1, -1 - ) # [bsz, 8, 128k, rank] - U_head = torch.gather(u_expand, 2, index_expanded) - - # [bsz, 8, sparse_budget, rank] -matmul- [8, rank, 128] --> [bsz, 8, sparse_budget, 128] - result = torch.einsum('bhrk,bhkd->bhrd', U_head, sv) - - # # rope the key cache - cos, sin = cos_sin_cache - result = apply_rotary_pos_emb_single(result, cos, sin, retrieval_position_ids) - - # send to buffer - self.k_cache_buffer[layer_idx][:, :, self.sparse_start: self.sparse_end].copy_( - result, non_blocking=True - ) - gen_offset = ( - self.gen_offset - if layer_idx == self.num_layers - 1 - else self.gen_offset + self.incoming_q_len - ) - - return self.k_cache_buffer[layer_idx][:, :, : self.sparse_end + gen_offset] - - def update_kv_cache( - self, - new_k_cache: torch.Tensor, - new_v_cache: torch.Tensor, - layer_idx: int, - ): - - incoming = new_k_cache.shape[-2] - self.v_cache_buffer[layer_idx][ - :, - :, - self.sparse_end - + self.gen_offset: self.sparse_end - + self.gen_offset - + incoming, - ].copy_(new_v_cache, non_blocking=True) - self.k_cache_buffer[layer_idx][ - :, - :, - self.sparse_end - + self.gen_offset: self.sparse_end - + self.gen_offset - + incoming, - ].copy_(new_k_cache, non_blocking=True) - - if layer_idx == self.num_layers - 1: - self.kv_offset += incoming - self.gen_offset += incoming - - -@KV_REGISTRY.register('SinkKV') -class SinkKVCache(DynamicCache): - def __init__( - self, - num_hidden_layers, - window_length, - num_sink_tokens, - ): - super().__init__() - self.window_length = window_length - self.num_sink_tokens = num_sink_tokens - self.cos_sin_rerotation_cache = {} - self._cos_cache = None - self._sin_cache = None - - @staticmethod - def _rotate_half(x): - x1 = x[..., :x.shape[-1] // 2] - x2 = x[..., x.shape[-1] // 2:] - return torch.cat((-x2, x1), dim=-1) - - def _apply_key_rotary_pos_emb( - self, key_states, cos, sin - ): - rotated_key_states = (key_states * cos) + (self._rotate_half(key_states) * sin) - return rotated_key_states - - def _get_rerotation_cos_sin( - self, key_states, cos, sin - ): - if key_states.shape[-2] not in self.cos_sin_rerotation_cache: - # Upcast to float32 temporarily for better accuracy - cos = cos.to(torch.float32) - sin = sin.to(torch.float32) - - original_cos = cos[self.num_sink_tokens + key_states.shape[-2]:] - shifted_cos = cos[self.num_sink_tokens:-key_states.shape[-2]] - original_sin = sin[self.num_sink_tokens + key_states.shape[-2]:] - shifted_sin = sin[self.num_sink_tokens:-key_states.shape[-2]] - rerotation_cos = original_cos * shifted_cos + original_sin * shifted_sin - rerotation_sin = -original_sin * shifted_cos + original_cos * shifted_sin - - self.cos_sin_rerotation_cache[key_states.shape[-2]] = ( - rerotation_cos.to(key_states.dtype).unsqueeze(0), - rerotation_sin.to(key_states.dtype).unsqueeze(0), - ) - return self.cos_sin_rerotation_cache[key_states.shape[-2]] - - def get_seq_length(self, layer_idx=0): - """Returns the sequence length of the cached states. - - A layer index can be optionally passed. - """ - if len(self.key_cache) <= layer_idx: - return 0 - return self.key_cache[layer_idx].shape[-2] - - def get_max_cache_shape(self): - """Returns the maximum sequence length of the cache object, in case of - SinkCache it is the window length.""" - return self.window_length - - def update( - self, - key_states, - value_states, - layer_idx, - cache_kwargs, - ): - - sin = cache_kwargs.get('sin') - cos = cache_kwargs.get('cos') - partial_rotation_size = cache_kwargs.get('partial_rotation_size') - using_rope = cos is not None and sin is not None - - if layer_idx == 0: - self._seen_tokens += key_states.shape[-2] - - if using_rope and layer_idx == 0: - - if cos.dim() == 2: - self._cos_cache = cos - self._sin_cache = sin - else: - if self._cos_cache is None: - self._cos_cache = cos[0, ...] - self._sin_cache = sin[0, ...] - elif self._cos_cache.shape[0] < self.window_length: - self._cos_cache = torch.cat([self._cos_cache, cos[0, ...]], dim=0) - self._sin_cache = torch.cat([self._sin_cache, sin[0, ...]], dim=0) - - # [bsz, num_heads, seq_len, head_dim] - if len(self.key_cache) <= layer_idx: - # Empty cache - self.key_cache.append(key_states) - self.value_cache.append(value_states) - - elif key_states.shape[-2] + self.get_seq_length(layer_idx) < self.window_length: - # Growing cache - self.key_cache[layer_idx] = \ - torch.cat([self.key_cache[layer_idx], key_states], dim=-2) - self.value_cache[layer_idx] = \ - torch.cat([self.value_cache[layer_idx], value_states], dim=-2) - - else: - # Shifting cache - keys_to_keep = self.key_cache[layer_idx][ - :, :, -self.window_length + self.num_sink_tokens + key_states.shape[-2]: - ] - - if using_rope: - rerotation_cos, rerotation_sin = self._get_rerotation_cos_sin( - key_states, - self._cos_cache[: self.window_length], - self._sin_cache[: self.window_length] - ) - if partial_rotation_size is not None: - keys_to_keep, keys_pass = ( - keys_to_keep[..., :partial_rotation_size], - keys_to_keep[..., partial_rotation_size:], - ) - keys_to_keep = self._apply_key_rotary_pos_emb(keys_to_keep, - rerotation_cos, - rerotation_sin) - if partial_rotation_size is not None: - keys_to_keep = torch.cat((keys_to_keep, keys_pass), dim=-1) - - # Concatenate sink tokens, shifted & rotated tokens (if needed), and new tokens - sink_keys = self.key_cache[layer_idx][:, :, : self.num_sink_tokens] - - self.key_cache[layer_idx] = torch.cat([sink_keys, keys_to_keep, key_states], dim=-2) - - sink_values = self.value_cache[layer_idx][:, :, : self.num_sink_tokens] - values_to_keep = self.value_cache[layer_idx][ - :, :, -self.window_length + self.num_sink_tokens + value_states.shape[-2]: - ] - - self.value_cache[layer_idx] = torch.cat([sink_values, - values_to_keep, - value_states], dim=-2) - - return self.key_cache[layer_idx], self.value_cache[layer_idx] - - def _reset_states(self): - self.key_cache = [] - self.value_cache = [] - self._seen_tokens = 0 diff --git a/llmc/compression/sparsification/magnitude.py b/llmc/compression/sparsification/magnitude.py deleted file mode 100644 index 8396b46a6..000000000 --- a/llmc/compression/sparsification/magnitude.py +++ /dev/null @@ -1,31 +0,0 @@ -import torch -from loguru import logger - -from llmc.utils.registry_factory import ALGO_REGISTRY - -from .base_blockwise_sparsification import BaseBlockwiseSparsification - - -@ALGO_REGISTRY -class Magnitude(BaseBlockwiseSparsification): - def __init__(self, model, sparsity_config, input, padding_mask, config): - super().__init__(model, sparsity_config, input, padding_mask, config) - - @torch.no_grad() - def subset_transform( - self, - subset, - input_feat, - subset_kwargs, - ): - layers_dict = subset['layers'] - - layers = list(layers_dict.values()) - for layer in layers: - W = layer.weight.data - W_metric = torch.abs(W) - thresh = torch.sort(W_metric.flatten().cuda())[0][ - int(W.numel() * self.sparser.sparsity) - ].cpu() - W_mask = W_metric <= thresh - W[W_mask] = 0 diff --git a/llmc/compression/sparsification/shortgpt.py b/llmc/compression/sparsification/shortgpt.py deleted file mode 100644 index 9684e19d4..000000000 --- a/llmc/compression/sparsification/shortgpt.py +++ /dev/null @@ -1,113 +0,0 @@ -import gc -import json -from typing import List, Optional - -import numpy as np -import torch -import torch.nn as nn -from loguru import logger -from transformers.models.llama.modeling_llama import LlamaRMSNorm -from transformers.models.mistral.modeling_mistral import MistralRMSNorm - -from llmc.utils import copy_files -from llmc.utils.registry_factory import ALGO_REGISTRY - -from .base_blockwise_sparsification import BaseBlockwiseSparsification - - -@ALGO_REGISTRY -class ShortGPT(BaseBlockwiseSparsification): - def __init__(self, model, sparsity_config, input, padding_mask, config): - super().__init__(model, sparsity_config, input, padding_mask, config) - self.importances = np.zeros(len(self.blocks)) - - def block_opt(self, block): - block = block.cuda() - - output_feat = self.block_forward(block) - torch.cuda.empty_cache() - self.block_transform(self.input['data'], output_feat) - self.input['data'] = output_feat - - def block_transform(self, input_feat, output_feat): - logger.info(f'Start transform the {self.block_idx+1}-th block') - self.subset_transform( - input_feat, - output_feat - ) - - @torch.no_grad() - def compute_bi( - self, - input_feat: torch.Tensor, - output_feat: torch.Tensor - ): - _, _, d = input_feat.shape - input_feat = input_feat.reshape(-1, d) - output_feat = output_feat.reshape(-1, d) - - norm_input = input_feat.norm(dim=-1, keepdim=True) - norm_output = output_feat.norm(dim=-1, keepdim=True) - - sim = (input_feat @ output_feat.T) / (norm_input * norm_output) - sim = sim.diagonal().nan_to_num(nan=0.5) - - return 1 - sim - - @torch.no_grad() - def subset_transform( - self, - input_feat, - output_feat - ): - # calculate BI score - self.importances[self.block_idx] = self.compute_bi( - input_feat[0], output_feat[0] - ).sum().cpu().item() - - @torch.no_grad() - def remove_layers( - self, - layers_to_remove: Optional[List[int]] = [] - ): - if not layers_to_remove and self.n_prune_layers: - layers_to_remove = np.argsort( - np.array(self.importances) - )[:self.n_prune_layers].tolist() - - for idx in sorted(layers_to_remove, reverse=True): - try: - del self.blocks[idx] - except IndexError: - logger.info(f'layer {idx} does not exist') - return layers_to_remove - - @torch.no_grad() - def deploy(self, deploy_format): - logger.info(f'After compute, BI scores are {self.importances}') - logger.info('-- deploy_sparsity_model start --') - logger.info(f'sparsity_config : {self.sparsity_config}') - logger.info('-- begin remove layers --') - layers_to_remove = self.remove_layers() - logger.info(f'remove layers: {layers_to_remove}') - logger.info('-- deploy_sparsity_model done --') - - @torch.no_grad() - def save_model(self, path): - if self.config.model.type == 'Llava': - self.model.llava_model.language_model = self.model.get_model() - self.model.llava_model.save_pretrained(path) - logger.info('save model done --') - self.copy_tokenizer(path) - copy_files(self.config.model.path, path, 'preprocessor_config') - else: - self.model.get_model().save_pretrained(path) - config_file = path + '/config.json' - - logger.info('save model done --') - self.copy_tokenizer(path) - with open(config_file, 'r') as file: - config_new = json.load(file) - config_new['num_hidden_layers'] = len(self.blocks) - with open(config_file, 'w') as file: - json.dump(config_new, file, indent=4) diff --git a/llmc/compression/sparsification/wanda.py b/llmc/compression/sparsification/wanda.py deleted file mode 100644 index e167ebd9b..000000000 --- a/llmc/compression/sparsification/wanda.py +++ /dev/null @@ -1,56 +0,0 @@ -import torch -import torch.nn as nn -from loguru import logger - -from llmc.utils.registry_factory import ALGO_REGISTRY - -from .base_blockwise_sparsification import BaseBlockwiseSparsification - - -@ALGO_REGISTRY -class Wanda(BaseBlockwiseSparsification): - def __init__(self, model, sparsity_config, input, padding_mask, config): - super().__init__(model, sparsity_config, input, padding_mask, config) - - @torch.no_grad() - def get_row_scale(self, layer, act): - if len(act.shape) == 2: - act = act.unsqueeze(0) - nsamples = act.shape[0] - if isinstance(layer, nn.Linear): - if len(act.shape) == 3: - act = act.reshape((-1, act.shape[-1])) - act = act.t() - - columns = layer.weight.data.shape[1] - - scaler_row = torch.zeros((columns), device=layer.weight.device) - - act = act.type(torch.float32).to(scaler_row.device) - scaler_row += torch.norm(act, p=2, dim=1) ** 2 / nsamples - return scaler_row - - @torch.no_grad() - def subset_transform( - self, - subset, - input_feat, - subset_kwargs, - ): - layers_dict = subset['layers'] - input_name = subset['input'][0] - - layers = list(layers_dict.values()) - for layer in layers: - scaler_row = self.get_row_scale(layer, input_feat[input_name][0]) - W_metric = torch.abs(layer.weight.data) * torch.sqrt( - scaler_row.reshape((1, -1)) - ) - - W_mask = ( - torch.zeros_like(W_metric) == 1 - ) # initialize a mask to be all False - sort_res = torch.sort(W_metric, dim=-1, stable=True) - indices = sort_res[1][:, : int(W_metric.shape[1] * self.sparser.sparsity)] - W_mask.scatter_(1, indices, True) - layer.weight.data[W_mask] = 0 # set weights to zero diff --git a/llmc/compression/token_reduction/__init__.py b/llmc/compression/token_reduction/__init__.py deleted file mode 100755 index 85dbe9ff1..000000000 --- a/llmc/compression/token_reduction/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -from .base_blockwise_token_reduction import TokenReduction -from .dart import DART -from .divprune import DivPrune -from .dycoke import DyCoke -from .fastervlm import FasterVLM -from .fastv import FastV -from .fastvid import FastVID -from .holitom import HoliTom -from .mustdrop import MustDrop -from .prunevid import PruneVid -from .pyramiddrop import PyramidDrop -from .random import RandomPrune -from .sparsevlm import SparseVLM -from .tome import ToMe -from .visionzip import VisionZip -from .vispruner import VisPruner -from .visualizer import Visualizer diff --git a/llmc/compression/token_reduction/base_blockwise_token_reduction.py b/llmc/compression/token_reduction/base_blockwise_token_reduction.py deleted file mode 100644 index 3574dfa2e..000000000 --- a/llmc/compression/token_reduction/base_blockwise_token_reduction.py +++ /dev/null @@ -1,31 +0,0 @@ -import functools -import gc -from collections import defaultdict - -import torch -from loguru import logger - -from llmc.utils.registry_factory import ALGO_REGISTRY, TOKEN_REDUCTION_REGISTRY - -from ..blockwise_optimization import BlockwiseOpt - - -@ALGO_REGISTRY -class TokenReduction(BlockwiseOpt): - def __init__(self, model, sparsity_config, input, padding_mask, config): - super().__init__(model, sparsity_config, input, padding_mask, config) - self.register_reduction_modules() - - def register_reduction_modules(self): - TOKEN_REDUCTION_REGISTRY[self.sparsity_config['special']['method']]( - self.sparsity_config, self.model, self.blocks - ) - - def block_opt(self, block): - pass - - @torch.no_grad() - def deploy(self, deploy_format): - logger.info('-- deploy_token_reduction_model start --') - logger.info(f'sparsity_config : {self.sparsity_config}') - logger.info('-- deploy_token_reduction_model done --') diff --git a/llmc/compression/token_reduction/dart.py b/llmc/compression/token_reduction/dart.py deleted file mode 100644 index 79742bc3e..000000000 --- a/llmc/compression/token_reduction/dart.py +++ /dev/null @@ -1,179 +0,0 @@ -import functools -from types import MethodType - -import torch - -from llmc.utils.registry_factory import TOKEN_REDUCTION_REGISTRY - -from .token_reduction_module import TokenReductionModule -from .utils import prefill_wrapper - - -@TOKEN_REDUCTION_REGISTRY.register('DART') -class DART(TokenReductionModule): - def __init__(self, config, model, blocks): - super().__init__(config, model, blocks) - self.add_sparse_config() - self.register_reduction_modules() - - def add_sparse_config(self): - self.pruning_loc = self.special_config['pruning_loc'] - - self.pruning_paras = self.special_config - - def register_reduction_modules(self): - - @prefill_wrapper - def vtoken_length_hook(module, args, pruning_paras): - input_ids = args[0] - token_indices = torch.where( - input_ids[0] == pruning_paras['vision_token_index'] - )[0] - pruning_paras['vision_token_length'] = token_indices.shape[0] - - @prefill_wrapper - def get_any_states_hook(module, args, kwargs, layer_outs, pruning_paras, layer_idx): - past_key_value = kwargs['past_key_value'] - if past_key_value is None: - raise ValueError('DART needs past_key_value but got None.') - pruning_paras['any_states'] = past_key_value.key_cache[layer_idx] - - @prefill_wrapper - def pruning_hook(module, args, kwargs, pruning_paras, normlayer): - - image_token_start_index = pruning_paras['vision_token_start_index'] - image_token_length = pruning_paras['vision_token_length'] - any_states = pruning_paras['any_states'] - - hidden_states = args[0] - attention_mask = kwargs['attention_mask'] - seq_length = hidden_states.shape[1] - device = hidden_states.device - last_layer_state = normlayer(hidden_states) - - # keep index - retained_image_tokens_index = get_retained_image_token( - pruning_paras, last_layer_state, any_states) - - keep_indexs = torch.cat( - ( - torch.arange(image_token_start_index, device=device), - retained_image_tokens_index, - torch.arange( - image_token_start_index + image_token_length, - seq_length, - device=device - ) - ) - ) - # sort index - keep_indexs = keep_indexs.sort().values - hidden_states = hidden_states[:, keep_indexs, :] - position_ids = keep_indexs.unsqueeze(0) - if attention_mask is not None: - attention_mask = attention_mask[ - :, :, :hidden_states.shape[1], :hidden_states.shape[1] - ] - kwargs['attention_mask'].resize_as_(attention_mask).copy_(attention_mask.clone()) - kwargs['cache_position'].resize_as_(position_ids.squeeze(0)).copy_( - position_ids.squeeze(0).clone()) - kwargs['position_ids'].resize_as_(position_ids).copy_(position_ids.clone()) - - position_embeddings = kwargs['position_embeddings'] - index_dim = 1 if position_embeddings[0].dim() == 3 else 2 - new_pe0 = position_embeddings[0].index_select(index_dim, keep_indexs).clone() - new_pe1 = position_embeddings[1].index_select(index_dim, keep_indexs).clone() - position_embeddings[0].resize_as_(new_pe0).copy_(new_pe0) - position_embeddings[1].resize_as_(new_pe0).copy_(new_pe1) - - return (hidden_states,), kwargs - - if self.special_config['vision_token_length'] is None: - if self.model.__class__.__name__ == 'Llava': - self.model.vlm_model.prepare_inputs_labels_for_multimodal = MethodType( - self.vtoken_length_for_llava_hook( - self.model.vlm_model.prepare_inputs_labels_for_multimodal, - self.pruning_paras - ), self.model.vlm_model - ) - else: - self.model.embed_tokens.register_forward_pre_hook( - functools.partial(vtoken_length_hook, pruning_paras=self.pruning_paras) - ) - - self.blocks[self.pruning_loc - 1].register_forward_hook( - functools.partial( - get_any_states_hook, - pruning_paras=self.pruning_paras, - layer_idx=self.pruning_loc - 1 - ), - with_kwargs=True - ) - - self.blocks[self.pruning_loc].register_forward_pre_hook( - functools.partial( - pruning_hook, - pruning_paras=self.pruning_paras, - normlayer=self.model.language_model.norm - ), - with_kwargs=True - ) - - -def get_retained_image_token(pruning_paras, last_layer_state, any_states): - image_token_start_index = pruning_paras['vision_token_start_index'] - image_token_length = pruning_paras['vision_token_length'] - pivot_image_token = pruning_paras['pivot_image_token'] - pivot_text_token = pruning_paras['pivot_text_token'] - reduction_ratio = pruning_paras['reduction_ratio'] - TOKEN_TOPK = int( - image_token_length * (1 - reduction_ratio) / (pivot_image_token + pivot_text_token) - ) - device = last_layer_state.device - - any_states = any_states.permute(0, 2, 1, 3) - any_states = any_states.reshape(any_states.shape[0], any_states.shape[1], -1) - - k_states_image_token = any_states[0][ - image_token_start_index:image_token_start_index + image_token_length, : - ] - k_states_query_token = any_states[0][image_token_start_index + image_token_length:, :] - - k_states_image_token_L1_norm = torch.norm(k_states_image_token, p=1, dim=-1) - k_states_query_token_L1_norm = torch.norm(k_states_query_token, p=1, dim=-1) - - image_indices = ( - k_states_image_token_L1_norm.topk(pivot_image_token).indices - + image_token_start_index - ).tolist() - query_indices = ( - k_states_query_token_L1_norm.topk(pivot_text_token).indices - + image_token_start_index + image_token_length - ).tolist() - indices_set = set(image_indices + query_indices) - - valid_indices = set( - range(image_token_start_index, image_token_start_index + image_token_length) - ) - set(image_indices) - - valid_indices_list = list(valid_indices) - for item in list(indices_set): - valid_vectors = last_layer_state[0][valid_indices_list, :] - cos_sim = -torch.nn.functional.cosine_similarity( - last_layer_state[0][item, :], - valid_vectors, - dim=-1 - ) - top_k_indices = cos_sim.topk(TOKEN_TOPK).indices - - top_k_real_indices = [valid_indices_list[i] for i in top_k_indices] - indices_set.update(top_k_real_indices) - - valid_indices.difference_update(top_k_real_indices) - valid_indices_list = list(valid_indices) - - indices_set.difference_update(query_indices) - - retained_image_tokens_index = torch.tensor(list(indices_set), device=device) - - return retained_image_tokens_index diff --git a/llmc/compression/token_reduction/divprune.py b/llmc/compression/token_reduction/divprune.py deleted file mode 100644 index 9ca45e86f..000000000 --- a/llmc/compression/token_reduction/divprune.py +++ /dev/null @@ -1,125 +0,0 @@ -from functools import wraps -from types import MethodType - -import torch - -from llmc.utils.registry_factory import TOKEN_REDUCTION_REGISTRY - -from .token_reduction_module import TokenReductionModule - - -def pairwise_cosine_similarity(matrix): - norm_matrix = matrix / matrix.norm(dim=1, keepdim=True) - cosine_similarity = torch.mm(norm_matrix, norm_matrix.t()) - return cosine_similarity - - -def divprune( - visual_feature_vectors, - image_feature_length, - cosine_matrix=None, - threshold_ratio=0.1, -): - threshold_terms = round(threshold_ratio * image_feature_length) - if cosine_matrix is None: - cosine_matrix = 1.0 - (pairwise_cosine_similarity(visual_feature_vectors)) - - s = torch.empty( - threshold_terms, dtype=torch.long, device=visual_feature_vectors.device - ) - for i in range(threshold_terms): - if i == 0: - m2 = cosine_matrix - else: - m2 = torch.index_select( - cosine_matrix, - 0, - torch.index_select( - s, 0, torch.arange(0, i, device=cosine_matrix.device) - ), - ) - - if i == 0: - scores = torch.topk(m2, 2, dim=0, largest=False).values[ - 1, : - ] # for distance - else: - scores = torch.min(m2, dim=0).values # for distance - - phrase_to_add_idx = torch.argmax(scores) - s[i] = phrase_to_add_idx - return s, cosine_matrix - - -def divprune_post_hook(*args, pruning_paras=None): - args = list(args) - position_ids, attention_mask, inputs_embeds = args[1], args[2], args[4] - rate = pruning_paras['reduction_ratio'] - SYS_TOKEN_LEN = pruning_paras['vision_token_start_index'] - img_feature_len = pruning_paras['vision_token_length'] - device = inputs_embeds.device - visual_tokens = inputs_embeds[0][SYS_TOKEN_LEN: SYS_TOKEN_LEN + img_feature_len] - selected_visual_tokens, cosine_matrix = divprune( - visual_tokens, img_feature_len, None, threshold_ratio=1 - rate - ) - - selected_visual_tokens += SYS_TOKEN_LEN - keep_indexs = torch.cat( - ( - torch.arange(SYS_TOKEN_LEN, device=device), - selected_visual_tokens, - torch.arange( - SYS_TOKEN_LEN + img_feature_len, inputs_embeds.shape[1], device=device - ), - ) - ) - keep_indexs = keep_indexs.sort().values - - if position_ids is not None: - args[1] = position_ids[:, keep_indexs, :] - if attention_mask is not None: - args[2] = attention_mask[:, keep_indexs] - args[4] = inputs_embeds[:, keep_indexs] - - return tuple(args) - - -@TOKEN_REDUCTION_REGISTRY.register('DivPrune') -class DivPrune(TokenReductionModule): - def __init__(self, config, model, blocks): - super().__init__(config, model, blocks) - self.add_sparse_config() - self.register_reduction_modules() - - def add_sparse_config(self): - self.pruning_paras = self.special_config - - def register_reduction_modules(self): - - def input_hook_llava(fn, pruning_paras, llava_next): - @wraps(fn) - def wrapper(self, *args, **kwargs): - if args[0].shape[1] == 1: - return fn(*args, **kwargs) - outs = fn(*args, **kwargs) - - if llava_next: - message = ( - 'To obtain the vision_token_length for LLaVA-1.6, you should append ' - '`image_features[0].shape[0]` to the return value of the function ' - '`prepare_inputs_labels_for_multimodal`, and modify the related code.' - ) - assert len(outs) == 7, message - pruning_paras['vision_token_length'] = outs[-1] - return divprune_post_hook(*outs, pruning_paras=pruning_paras) - return wrapper - - if self.model.__class__.__name__ == 'Llava': - - self.model.vlm_model.prepare_inputs_labels_for_multimodal = MethodType( - input_hook_llava( - self.model.vlm_model.prepare_inputs_labels_for_multimodal, - self.pruning_paras, - llava_next=self.special_config['vision_token_length'] is None - ), self.model.vlm_model - ) diff --git a/llmc/compression/token_reduction/dycoke.py b/llmc/compression/token_reduction/dycoke.py deleted file mode 100644 index b46916584..000000000 --- a/llmc/compression/token_reduction/dycoke.py +++ /dev/null @@ -1,122 +0,0 @@ -import functools -from typing import List, Optional, Tuple, Union - -import torch -import torch.nn.functional as F -from loguru import logger - -try: - from llava.model.llava_arch import LlavaMetaForCausalLM -except ImportError: - pass -from transformers.cache_utils import Cache, DynamicCache - -from llmc.utils.registry_factory import TOKEN_REDUCTION_REGISTRY - -from .token_reduction_module import TokenReductionModule -from .utils import add_post_hook_to_get_2dPool - - -def dycole_ttm(image_feature, pruning_paras): - bs, num_tokens_per_frame, _ = image_feature.shape - image_feature = image_feature.flatten(0, 1) - # Split frames into tokens - num_frames = image_feature.shape[0] // num_tokens_per_frame - merging_ratio = 1 - pruning_paras['merging_ratio'] - # Calculate similarities between adjacent even frames - similarities = [] - for i in range(0, num_frames - 1, 2): - # Get tokens for adjacent frames - frame1_tokens = image_feature[ - i * num_tokens_per_frame: (i + 1) * num_tokens_per_frame - ] - frame2_tokens = image_feature[ - (i + 1) * num_tokens_per_frame: (i + 2) * num_tokens_per_frame - ] - - # Calculate cosine similarity between normalized tokens - frame1_norm = torch.nn.functional.normalize(frame1_tokens, p=2, dim=1) - frame2_norm = torch.nn.functional.normalize(frame2_tokens, p=2, dim=1) - similarity = torch.nn.functional.cosine_similarity( - frame1_norm, frame2_norm, dim=1 - ) - similarities.append(similarity) - - similarities = torch.stack( - [torch.tensor(similarity) for similarity in similarities] - ) - - # Process even frames - modified_image_feature = [] - for i in range(0, num_frames - 1, 2): - frame1_tokens = image_feature[ - i * num_tokens_per_frame: (i + 1) * num_tokens_per_frame - ] - frame2_tokens = image_feature[ - (i + 1) * num_tokens_per_frame: (i + 2) * num_tokens_per_frame - ] - - avg_similarity = similarities[i // 2] - num_tokens_to_keep = int(merging_ratio * num_tokens_per_frame) - tokens_to_keep = avg_similarity.topk(num_tokens_to_keep, largest=False).indices - - modified_image_feature.append(frame1_tokens) - modified_image_feature.append(frame2_tokens[tokens_to_keep]) - - # Process odd frames - odd_similarities = [] - for i in range(0, num_frames - 4, 4): - frame1_tokens = image_feature[ - i * num_tokens_per_frame: (i + 1) * num_tokens_per_frame - ] - frame2_tokens = image_feature[ - (i + 2) * num_tokens_per_frame: (i + 3) * num_tokens_per_frame - ] - - similarity = torch.nn.functional.cosine_similarity( - frame1_tokens, frame2_tokens, dim=1 - ) - odd_similarities.append(similarity) - - odd_similarities = torch.stack( - [torch.tensor(similarity) for similarity in odd_similarities] - ) - - for i in range(0, num_frames - 4, 4): - frame1_tokens = image_feature[ - i * num_tokens_per_frame: (i + 1) * num_tokens_per_frame - ] - frame2_tokens = image_feature[ - (i + 2) * num_tokens_per_frame: (i + 3) * num_tokens_per_frame - ] - - avg_similarity = odd_similarities[i // 4] - num_tokens_to_keep = int(merging_ratio * num_tokens_per_frame) - tokens_to_keep = avg_similarity.topk(num_tokens_to_keep, largest=False).indices - - modified_image_feature[i] = frame1_tokens - modified_image_feature[i + 2] = frame2_tokens[tokens_to_keep] - - # Combine all tokens - combined_tokens = torch.cat(modified_image_feature, dim=0).unsqueeze(0) - return combined_tokens - - -@TOKEN_REDUCTION_REGISTRY.register('DyCoke') -class DyCoke(TokenReductionModule): - def __init__(self, config, model, blocks): - super().__init__(config, model, blocks) - self.add_sparse_config() - self.register_reduction_modules() - - def add_sparse_config(self): - self.special_config['different_token_idxs'] = [] - self.dycoke_layer_idx = self.special_config['dycoke_layer_idx'] - self.model.model.pruning_paras = self.special_config - - def register_reduction_modules(self): - - if isinstance(self.model.model, LlavaMetaForCausalLM): - add_post_hook_to_get_2dPool( - self.model.model, dycole_ttm, self.model.model.pruning_paras - ) diff --git a/llmc/compression/token_reduction/fastervlm.py b/llmc/compression/token_reduction/fastervlm.py deleted file mode 100644 index 691815be7..000000000 --- a/llmc/compression/token_reduction/fastervlm.py +++ /dev/null @@ -1,178 +0,0 @@ -import functools - -import torch - -from llmc.compression.sparsification.attn_utils import _update_causal_mask -from llmc.utils.registry_factory import TOKEN_REDUCTION_REGISTRY - -from .token_reduction_module import TokenReductionModule - - -@TOKEN_REDUCTION_REGISTRY.register('FasterVLM') -class FasterVLM(TokenReductionModule): - def __init__(self, config, model, blocks): - super().__init__(config, model, blocks) - self.add_sparse_config() - self.register_reduction_modules() - - def add_sparse_config(self): - special_config = self.config.get('special', {}) - self.visual_token_num = round( - self.model.pruning_config['image_token_length'] * (1 - special_config['rate']) - ) - special_config['select_layer'] = self.model.pruning_config['select_layer'] - special_config['select_feature'] = self.model.pruning_config['select_feature'] - special_config['image_token_index'] = self.model.pruning_config['image_token_index'] - - special_config['image_attentions_list'] = [] - - self.pruning_paras = special_config - - def register_reduction_modules(self): - - def update_output_attentions_hook(module, args, kwargs): - kwargs['output_attentions'] = True - return args, kwargs - - def clear_attentions_hook(m, x, pruning_paras): - pruning_paras['image_attentions_list'].clear() - - def store_attention_hook(m, x, image_forward_outs, pruning_paras): - image_attentions = image_forward_outs.attentions[pruning_paras['select_layer']] - if pruning_paras['select_feature'] in ('default', 'patch'): - image_attention = image_attentions[:, :, 0, 1:] - elif pruning_paras['select_feature'] in ('full', 'cls_patch'): - image_attention = image_attentions - else: - raise ValueError(f'Unexpected select feature: {self.select_feature}') - pruning_paras['image_attentions_list'].append(image_attention.to(x[0].dtype)) - - def update_attentions_hook(m, x, outs, pruning_paras): - if len(pruning_paras['image_attentions_list']) == 1: - pruning_paras['image_attentions'] = pruning_paras['image_attentions_list'][0] - else: - pruning_paras['image_attentions'] = pruning_paras['image_attentions_list'] - - def pruning_hook(module, args, kwargs, pruning_paras): - - # for llavahf bs 1 - if 'image_attentions' not in pruning_paras: - pruning_paras['image_attentions'] = pruning_paras['image_attentions_list'][0] - - image_features = args[0] - image_attentions = pruning_paras['image_attentions'] - - B, N, C = image_features.shape - visual_token_num = self.visual_token_num # T - - # prune visual tokens by attention scores - image_attentions = image_attentions.mean(dim=1) # (B, N) - token_indices = torch.topk(image_attentions, k=visual_token_num, dim=1)[1] # (B, T) - - # generate index mask - index_masks = torch.zeros( - B, N, - dtype=torch.bool, - device=image_features.device - ) # (B, N) - index_masks.scatter_(1, token_indices, True) # (B, N) - - pruning_paras['index_masks'] = index_masks - - return (image_features,), kwargs - - def get_image_mask_hook(module, args, kwargs, pruning_paras): - pruning_paras['image_masks'] = ( - kwargs['input_ids'] == pruning_paras['image_token_index'] - ) # (B, len) - - def prepare_inputs_for_llm_hook(module, args, kwargs, pruning_paras): - - # Only batch size 1 is currently supported. - inputs_embeds = kwargs['inputs_embeds'] - image_mask = pruning_paras['image_masks'][0] - index_mask = pruning_paras['index_masks'][0] - - B, L = inputs_embeds.shape[:2] - device = inputs_embeds.device - - visual_indexs = torch.arange(L, device=device)[image_mask] - keep_visual_indexs = visual_indexs[index_mask] - - non_visual_indexs = torch.arange(L, device=device)[~image_mask] - - keep_indexs = torch.cat([non_visual_indexs, keep_visual_indexs]).sort().values - - new_inputs_embeds = kwargs['inputs_embeds'][:, keep_indexs, :] - new_attention_mask = kwargs['attention_mask'][:, keep_indexs] - new_position_ids = kwargs['position_ids'][:, keep_indexs] - new_cache_position = kwargs['cache_position'][keep_indexs] - - kwargs['inputs_embeds'] = new_inputs_embeds - kwargs['attention_mask'] = new_attention_mask - kwargs['position_ids'] = new_position_ids - kwargs['cache_position'] = new_cache_position - - return args, kwargs - - def prepare_inputs_hook(module, inputs, outputs, pruning_paras): - - image_features = outputs - index_masks = pruning_paras['index_masks'] - # image_attentions = pruning_paras['image_attentions'] - new_image_features = [] - for image_feature, index_mask in zip(image_features, index_masks): - image_feature = image_feature[index_mask] - new_image_features.append(image_feature) - image_features = torch.stack(new_image_features, dim=0) - - outputs = image_features - pruning_paras['image_features_shape'] = image_features[0].shape[0] - - return outputs - - if self.model.__class__.__name__ == 'LlavaHf': - self.model.vision_model.register_forward_pre_hook( - update_output_attentions_hook, - with_kwargs=True - ) - - self.model.vision_model.register_forward_hook( - functools.partial(store_attention_hook, pruning_paras=self.pruning_paras), - ) - elif self.model.__class__.__name__ == 'Llava': - self.model.vision_model.register_forward_pre_hook( - functools.partial(clear_attentions_hook, pruning_paras=self.pruning_paras), - ) - - self.model.vision_model.register_forward_hook( - functools.partial(update_attentions_hook, pruning_paras=self.pruning_paras), - ) - - self.model.vision_model.vision_tower.register_forward_pre_hook( - update_output_attentions_hook, - with_kwargs=True - ) - - self.model.vision_model.vision_tower.register_forward_hook( - functools.partial(store_attention_hook, pruning_paras=self.pruning_paras), - ) - - self.model.vision_projector.register_forward_pre_hook( - functools.partial(pruning_hook, pruning_paras=self.pruning_paras), - with_kwargs=True - ) - - if self.model.__class__.__name__ == 'LlavaHf': - self.model.vlm_model.register_forward_pre_hook( - functools.partial(get_image_mask_hook, pruning_paras=self.pruning_paras), - with_kwargs=True - ) - self.model.model.model.register_forward_pre_hook( - functools.partial(prepare_inputs_for_llm_hook, pruning_paras=self.pruning_paras), - with_kwargs=True - ) - elif self.model.__class__.__name__ == 'Llava': - self.model.vision_projector.register_forward_hook( - functools.partial(prepare_inputs_hook, pruning_paras=self.pruning_paras), - ) diff --git a/llmc/compression/token_reduction/fastv.py b/llmc/compression/token_reduction/fastv.py deleted file mode 100644 index 2a2e8e478..000000000 --- a/llmc/compression/token_reduction/fastv.py +++ /dev/null @@ -1,128 +0,0 @@ -import functools -from types import MethodType - -import torch - -from llmc.utils.registry_factory import TOKEN_REDUCTION_REGISTRY - -from .token_reduction_module import TokenReductionModule -from .utils import prefill_wrapper - - -@TOKEN_REDUCTION_REGISTRY.register('FastV') -class FastV(TokenReductionModule): - def __init__(self, config, model, blocks): - super().__init__(config, model, blocks) - self.add_sparse_config() - self.register_reduction_modules() - - def add_sparse_config(self): - self.pruning_loc = self.special_config['pruning_loc'] - - self.pruning_paras = self.special_config - - def register_reduction_modules(self): - - @prefill_wrapper - def vtoken_length_hook(module, input_args, pruning_paras): - input_ids = input_args[0] - token_indices = torch.where( - input_ids[0] == pruning_paras['vision_token_index'] - )[0] - pruning_paras['vision_token_length'] = token_indices.shape[0] - return input_args - - @prefill_wrapper - def update_output_attentions_hook(module, args, kwargs, pruning_paras): - kwargs['output_attentions'] = True - pruning_paras['attn_scores'] = module.__class__.forward(module, *args, **kwargs)[1] - kwargs['output_attentions'] = False - return args, kwargs - - @prefill_wrapper - def fastv_pruning_hook(module, args, kwargs, pruning_paras): - - rate = pruning_paras['rate'] - image_token_start_index = pruning_paras['vision_token_start_index'] - image_token_length = pruning_paras['vision_token_length'] - - hidden_states = args[0] - causal_mask = kwargs['attention_mask'] - - device = hidden_states.device - # last_layer_attention = layer_outputs[1] - last_layer_attention = pruning_paras['attn_scores'] - # compute average attention over different head - last_layer_attention_avg = torch.mean(last_layer_attention, dim=1)[0] - # generate new attention mask based on the average attention, - # sample the top ATTENTION_RANK tokens with highest attention - last_layer_attention_avg_last_tok = last_layer_attention_avg[-1] - # get the attention in image token - last_layer_attention_avg_last_tok_image = \ - last_layer_attention_avg_last_tok[image_token_start_index: - image_token_start_index + image_token_length] - # get the indexes of the top ATTENTION_RANK tokens - top_attention_rank_index = \ - last_layer_attention_avg_last_tok_image.topk( - round(image_token_length * (1 - rate))).indices + image_token_start_index - - if self.model.first_turn_question: - module.register_buffer('top_attention_rank_index', top_attention_rank_index) - else: - top_attention_rank_index = module.top_attention_rank_index - - # keep index - keep_indexs = torch.cat( - ( - torch.arange(image_token_start_index, device=device), - top_attention_rank_index, - torch.arange(image_token_start_index + image_token_length, - hidden_states.shape[1], device=device) - ) - ) - - # sort index - keep_indexs = keep_indexs.sort().values - # filter hidden states & - hidden_states = hidden_states[:, keep_indexs, :] - # update position ids - position_ids = keep_indexs.unsqueeze(0) - # update attention mask - if causal_mask is not None: - causal_mask = causal_mask[:, :, :hidden_states.shape[1], :hidden_states.shape[1]] - kwargs['attention_mask'].resize_as_(causal_mask).copy_(causal_mask.clone()) - kwargs['cache_position'].resize_as_(position_ids.squeeze(0)).copy_( - position_ids.squeeze(0).clone()) - kwargs['position_ids'].resize_as_(position_ids).copy_(position_ids.clone()) - - position_embeddings = kwargs['position_embeddings'] - index_dim = 1 if position_embeddings[0].dim() == 3 else 2 - new_pe0 = position_embeddings[0].index_select(index_dim, keep_indexs).clone() - new_pe1 = position_embeddings[1].index_select(index_dim, keep_indexs).clone() - position_embeddings[0].resize_as_(new_pe0).copy_(new_pe0) - position_embeddings[1].resize_as_(new_pe0).copy_(new_pe1) - - return (hidden_states,), kwargs - - if self.special_config['vision_token_length'] is None: - if self.model.__class__.__name__ == 'Llava': - self.model.vlm_model.prepare_inputs_labels_for_multimodal = MethodType( - self.vtoken_length_for_llava_hook( - self.model.vlm_model.prepare_inputs_labels_for_multimodal, - self.pruning_paras - ), self.model.vlm_model - ) - else: - self.model.embed_tokens.register_forward_pre_hook( - functools.partial(vtoken_length_hook, pruning_paras=self.pruning_paras) - ) - - self.blocks[self.pruning_loc - 1].register_forward_pre_hook( - functools.partial(update_output_attentions_hook, pruning_paras=self.pruning_paras), - with_kwargs=True - ) - - self.blocks[self.pruning_loc].register_forward_pre_hook( - functools.partial(fastv_pruning_hook, pruning_paras=self.pruning_paras), - with_kwargs=True - ) diff --git a/llmc/compression/token_reduction/fastvid.py b/llmc/compression/token_reduction/fastvid.py deleted file mode 100644 index 96e60fc9c..000000000 --- a/llmc/compression/token_reduction/fastvid.py +++ /dev/null @@ -1,585 +0,0 @@ -import functools -import math -from types import MethodType - -import torch -import torch.nn as nn -import torch.nn.functional as F -from loguru import logger - -try: - from llava.constants import IMAGE_TOKEN_INDEX - from llava.model.llava_arch import LlavaMetaForCausalLM - from llava.model.multimodal_encoder.siglip_encoder import ( - SigLipVisionConfig, SigLipVisionModel) - from llava.utils import rank0_print -except ImportError: - pass - -from llmc.utils.registry_factory import TOKEN_REDUCTION_REGISTRY - -from .token_reduction_module import TokenReductionModule - - -def head_forward(self, hidden_state): - batch_size = hidden_state.shape[0] - probe = self.probe.repeat(batch_size, 1, 1) - - hidden_state, attn_weights = self.attention(probe, hidden_state, hidden_state) - - residual = hidden_state - hidden_state = self.layernorm(hidden_state) - hidden_state = residual + self.mlp(hidden_state) - - return hidden_state[:, 0], attn_weights - - -class SigLipVisionAbstract(nn.Module): - def __init__(self, vision_tower, vision_tower_cfg, delay_load=False): - super().__init__() - - self.is_loaded = False - - self.config = SigLipVisionConfig() - - self.vision_tower_name = vision_tower - - if not delay_load: - rank0_print(f'Loading vision abstract: {vision_tower}') - self.load_model() - else: - self.cfg_only = self.config - - def load_model(self, device_map=None): - if self.is_loaded: - rank0_print( - '{} is already loaded, `load_model` called again, skipping.'.format( - self.vision_tower_name - ) - ) - return - - self.vision_abstract = SigLipVisionModel.from_pretrained( - self.vision_tower_name, device_map=device_map - ) - - del self.vision_abstract.vision_model.embeddings - del self.vision_abstract.vision_model.encoder - - self.vision_abstract.requires_grad_(False) - - self.is_loaded = True - - self.vision_abstract.vision_model.head.__class__.forward = head_forward - - def forward(self, images): - last_hidden_state = self.vision_abstract.vision_model.post_layernorm(images) - pooled_output, attn_weights = self.vision_abstract.vision_model.head( - last_hidden_state - ) - return pooled_output, attn_weights - - @property - def dummy_feature(self): - return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype) - - @property - def dtype(self): - for p in self.vision_tower.parameters(): - return p.dtype - - @property - def device(self): - for p in self.vision_tower.parameters(): - return p.device - - @property - def hidden_size(self): - return self.config.hidden_size - - @property - def num_patches(self): - return (self.config.image_size // self.config.patch_size) ** 2 - - @property - def num_patches_per_side(self): - return self.config.image_size // self.config.patch_size - - @property - def image_size(self): - return self.config.image_size - - -def build_vision_abstract(vision_tower_cfg, **kwargs): - vision_tower = getattr( - vision_tower_cfg, - 'mm_vision_tower', - getattr(vision_tower_cfg, 'vision_tower', None), - ) - - return SigLipVisionAbstract( - vision_tower, vision_tower_cfg=vision_tower_cfg, **kwargs - ) - - -def get_attn_2dPool(frame_attn_weights, stride=2, pruning_paras=None): - height = width = pruning_paras['num_patches_per_side'] - num_frames, _, num_tokens = frame_attn_weights.shape - frame_attn_weights = frame_attn_weights.view( - num_frames, 1, height, width - ).contiguous() - if pruning_paras['mm_spatial_pool_mode'] == 'bilinear': - height, width = frame_attn_weights.shape[-2:] - scaled_shape = [math.ceil(height / stride), math.ceil(width / stride)] - frame_attn_weights = nn.functional.interpolate( - frame_attn_weights, size=scaled_shape, mode='bilinear' - ) - else: - raise ValueError( - f"Unexpected mm_spatial_pool_mode: {pruning_paras['mm_spatial_pool_mode']}" - ) - frame_attn_weights = frame_attn_weights.view(-1) - return frame_attn_weights - - -@TOKEN_REDUCTION_REGISTRY.register('FastVID') -class FastVID(TokenReductionModule): - def __init__(self, config, model, blocks): - super().__init__(config, model, blocks) - self.add_sparse_config() - self.register_reduction_modules() - - def add_sparse_config(self): - special_config = self.config.get('special', {}) - vlm_model = self.model.vlm_model - assert self.model.__class__.__name__ in ('Llava_OneVision',) - if self.model.__class__.__name__ == 'Llava_OneVision': - delay_load = getattr(vlm_model.config, 'delay_load', False) - vision_abstract = build_vision_abstract( - vlm_model.config, delay_load=delay_load - ) - vision_abstract.to(device='cuda', dtype=torch.float16) - special_config['vision_abstract'] = vision_abstract - - special_config['num_patches_per_side'] = ( - vlm_model.get_vision_tower().num_patches_per_side - ) - special_config['mm_spatial_pool_mode'] = vlm_model.config.mm_spatial_pool_mode - self.pruning_paras = special_config - - def register_reduction_modules(self): - - def make_hook_prepare_inputs_labels_for_multimodal(pruning_paras): - def hook_prepare_inputs_labels_for_multimodal( - self, - input_ids, - position_ids, - attention_mask, - past_key_values, - labels, - images, - modalities=['image'], - image_sizes=None, - ): - if 'image_token_start_index' not in pruning_paras: - token_indices = input_ids[0][attention_mask[0]] == IMAGE_TOKEN_INDEX - pruning_paras['image_token_start_index'] = torch.where( - token_indices - )[0].item() - return self._original_prepare_inputs_labels_for_multimodal( - input_ids, - position_ids, - attention_mask, - None, - None, - images, - modalities, - image_sizes=image_sizes, - ) - - return hook_prepare_inputs_labels_for_multimodal - - if self.model.__class__.__name__ == 'Llava_OneVision': - hook_fn = make_hook_prepare_inputs_labels_for_multimodal(self.pruning_paras) - self.model.vlm_model._original_prepare_inputs_labels_for_multimodal = ( - self.model.vlm_model.prepare_inputs_labels_for_multimodal - ) - self.model.vlm_model.prepare_inputs_labels_for_multimodal = MethodType( - hook_fn, self.model.vlm_model - ) - - def vision_abstract_hook(module, args, kwargs, pruning_pars): - image_features = args[0] - frame_global_features, frame_attn_weights = pruning_pars['vision_abstract']( - image_features - ) - frame_attn_weights = get_attn_2dPool( - frame_attn_weights, pruning_paras=pruning_pars - ) - - pruning_pars['frame_global_features'] = frame_global_features - pruning_pars['frame_attn_weights'] = frame_attn_weights - - return args, kwargs - - def fastvid_hook(module, args, kwargs, pruning_paras): - hidden_states = args[0] - seq_length = hidden_states.shape[1] - causal_mask = kwargs['attention_mask'] - position_embeddings = kwargs['position_embeddings'] - frame_global_features = pruning_paras['frame_global_features'] - frame_attn_weights = pruning_paras['frame_attn_weights'] - video_start_idx = pruning_paras['image_token_start_index'] - - frame_num = frame_global_features.shape[0] - video_token_len = frame_attn_weights.shape[0] - - # FastVID - if seq_length > 1: - device_type = hidden_states.device - hidden_states_dim = hidden_states.shape[-1] - frame_token_len = video_token_len // frame_num - batchframe_indices = torch.arange( - frame_num, device=device_type - ).unsqueeze(1) - alltoken_indices = ( - torch.arange(video_token_len, device=device_type).view( - frame_num, frame_token_len - ) - + video_start_idx - ) - - video_hidden_states = hidden_states[ - :, video_start_idx: video_start_idx + video_token_len, : - ].squeeze(0) - video_hidden_states = video_hidden_states.reshape( - frame_num, frame_token_len, -1 - ) - frame_attn_weights = frame_attn_weights.reshape( - frame_num, frame_token_len - ) - - # DySeg - # frame_global_features = self.frame_global_features - frame_global_features = ( - frame_global_features - / frame_global_features.norm(dim=1, keepdim=True) - ) - similarity_matrix = ( - frame_global_features[:-1] * frame_global_features[1:] - ).sum(dim=1) - - cut_indices_topk = torch.topk( - similarity_matrix, pruning_paras['DySeg_c'] - 1, largest=False - ).indices - cut_indices_cos = torch.nonzero( - similarity_matrix < pruning_paras['DySeg_tau'], as_tuple=False - ).squeeze(1) - cut_indices = ( - torch.unique(torch.cat([cut_indices_topk, cut_indices_cos])) - .sort() - .values - ) - padded = F.pad(cut_indices, (1, 1), value=-1) - padded[-1] = frame_num - 1 - segment_sizes = padded.diff().tolist() - - # STPrune - keep_indexs = () - keep_indexs += (torch.arange(video_start_idx, device=device_type),) - keep_indexs += ( - torch.arange( - video_start_idx + video_token_len, - seq_length, - device=device_type, - ), - ) - start_tokens = hidden_states[0, :video_start_idx, :] - end_tokens = hidden_states[0, video_start_idx + video_token_len:, :] - final_tokens = [start_tokens, end_tokens] - - frame_retain_num = int( - frame_token_len * pruning_paras['retention_ratio'] - ) - - frame_salient_num = frame_retain_num - int( - frame_retain_num * pruning_paras['STPrune_d'] - ) - # frm_salient_num_list = [frame_salient_num] * frame_num - - frm_context_num_list = torch.zeros( - frame_num, dtype=torch.int, device=device_type - ) - frame_context_num = frame_retain_num - frame_salient_num - - # Compute Anchor Token Distribution - offset = 0 - for seg_i_len in segment_sizes: - seg_context_num = frame_context_num * seg_i_len - temp_num = ( - seg_i_len + pruning_paras['DTM_p'] - 1 - ) // pruning_paras['DTM_p'] - cur_frm_context_num = seg_context_num // temp_num - - end = offset + seg_i_len - seg_indices = torch.arange( - seg_i_len - 1, -1, -1, device=device_type - ) - mask = seg_indices % pruning_paras['DTM_p'] == 0 - - frm_context_num_list[offset:end][mask] = cur_frm_context_num - offset = end - - # ATS - salient_indexes = torch.topk( - frame_attn_weights, frame_salient_num, dim=1 - ).indices - - batch_indices = batchframe_indices.expand(-1, frame_salient_num) - salient_tokens = video_hidden_states[batch_indices, salient_indexes] - salient_global_indexes = alltoken_indices[ - batch_indices, salient_indexes - ] - - final_tokens.append(salient_tokens.view(-1, hidden_states_dim)) - keep_indexs += (salient_global_indexes.view(-1),) - - # Parallel Density Score Computation - all_indices = ( - torch.arange(frame_token_len, device=device_type) - .unsqueeze(0) - .expand(frame_num, -1) - ) - all_indices_mask = torch.ones_like(all_indices, dtype=torch.bool) - all_indices_mask.scatter_(1, salient_indexes, False) - filtered_indices = all_indices[all_indices_mask].view( - frame_num, frame_token_len - frame_salient_num - ) - - batch_indices = batchframe_indices.expand( - -1, frame_token_len - frame_salient_num - ) - token_filtered = video_hidden_states[batch_indices, filtered_indices] - alltoken_filtered_indices = alltoken_indices[ - batch_indices, filtered_indices - ] - - tmp_frm_hidden_states = token_filtered - dist_matrix = torch.cdist( - tmp_frm_hidden_states.float(), tmp_frm_hidden_states.float() - ) / (hidden_states_dim**0.5) - - dist_nearest, index_nearest = torch.topk( - dist_matrix, k=4, dim=-1, largest=False - ) - density = (-(dist_nearest**2).mean(dim=-1)).exp() - density = ( - density - + torch.rand(density.shape, device=device_type, dtype=density.dtype) - * 1e-6 - ) - - density_mask = density[:, None, :] > density[:, :, None] - density_mask = density_mask.type(tmp_frm_hidden_states.dtype) - dist_max = dist_matrix.flatten(1).max(dim=-1)[0][:, None, None] - dist_0, index_parent = ( - dist_matrix * density_mask + dist_max * (1 - density_mask) - ).min(dim=-1) - - density_score = dist_0 * density - - sampled_indexs = torch.topk( - density_score, k=frame_context_num, dim=-1 - ).indices - - # DTM for Single-Frame Segment - batch_indices = batchframe_indices.expand(-1, frame_context_num) - frm_context_tokens = token_filtered[batch_indices, sampled_indexs] - frm_context_global_indexes = alltoken_filtered_indices[ - batch_indices, sampled_indexs - ] - - to_be_merge_tokens = token_filtered / token_filtered.norm( - dim=-1, keepdim=True - ) - merge_target_tokens = to_be_merge_tokens[batch_indices, sampled_indexs] - - similarity = torch.bmm( - to_be_merge_tokens, merge_target_tokens.transpose(1, 2) - ) - assign_one_hot = torch.zeros( - frame_num, - frame_token_len - frame_salient_num, - frame_context_num, - dtype=token_filtered.dtype, - device=device_type, - ) - assign_one_hot.scatter_(2, similarity.argmax(dim=2).unsqueeze(-1), 1) - - avg_weights = (1 / (assign_one_hot.sum(dim=1).unsqueeze(-1) + 1)).clamp( - min=pruning_paras['DTM_alpha'] - ) - - counts = assign_one_hot.sum(dim=1).clamp(min=1).unsqueeze(-1) - aggregated_hidden = ( - torch.bmm(assign_one_hot.transpose(1, 2), token_filtered) / counts - ) - - frm_context_tokens = ( - avg_weights * frm_context_tokens - + (1 - avg_weights) * aggregated_hidden - ) - - context_for_frame_mask = frm_context_num_list == frame_context_num - # context_for_frame_num = context_for_frame_mask.sum() - - context_for_frame_tokens = frm_context_tokens[context_for_frame_mask] - context_for_frame_global_indexes = frm_context_global_indexes[ - context_for_frame_mask - ] - - final_tokens.append( - context_for_frame_tokens.view(-1, hidden_states_dim) - ) - keep_indexs += (context_for_frame_global_indexes.view(-1),) - - # DTM for Multi-Frame Segment - idx_seg_start = 0 - for seg_i_len in segment_sizes: - if seg_i_len > 1: - cur_seg_context_num_list = frm_context_num_list[ - idx_seg_start: idx_seg_start + seg_i_len - ] - cur_seg_context_num = cur_seg_context_num_list[-1] - - cur_seg_target_mask = ( - cur_seg_context_num_list > frame_context_num - ) - cur_seg_target_num = cur_seg_target_mask.sum() - - cur_seg_density_score = density_score[ - idx_seg_start: idx_seg_start + seg_i_len - ] - cur_seg_density_score = cur_seg_density_score[ - cur_seg_target_mask - ] - - cur_seg_token_filtered = token_filtered[ - idx_seg_start: idx_seg_start + seg_i_len - ] - cur_seg_token_target = cur_seg_token_filtered[ - cur_seg_target_mask - ] - cur_seg_token_filtered = cur_seg_token_filtered.view( - 1, -1, hidden_states_dim - ).expand(cur_seg_target_num, -1, -1) - - cur_seg_alltoken_indices = alltoken_filtered_indices[ - idx_seg_start: idx_seg_start + seg_i_len - ] - cur_seg_alltoken_indices = cur_seg_alltoken_indices[ - cur_seg_target_mask - ] - - sampled_indexs = torch.topk( - cur_seg_density_score, k=cur_seg_context_num, dim=-1 - ).indices - batch_indices = batchframe_indices[:cur_seg_target_num].expand( - -1, cur_seg_context_num - ) - cur_context_tokens = cur_seg_token_target[ - batch_indices, sampled_indexs - ] - cur_context_global_indexes = cur_seg_alltoken_indices[ - batch_indices, sampled_indexs - ] - - to_be_merge_tokens = ( - cur_seg_token_filtered - / cur_seg_token_filtered.norm(dim=-1, keepdim=True) - ) - merge_target_tokens = ( - cur_context_tokens - / cur_context_tokens.norm(dim=-1, keepdim=True) - ) - - similarity = torch.bmm( - to_be_merge_tokens, merge_target_tokens.transpose(1, 2) - ) - assign_one_hot = torch.zeros( - cur_seg_target_num, - to_be_merge_tokens.shape[1], - cur_seg_context_num, - dtype=token_filtered.dtype, - device=device_type, - ) - assign_one_hot.scatter_( - 2, similarity.argmax(dim=2).unsqueeze(-1), 1 - ) - - avg_weights = ( - 1 / (assign_one_hot.sum(dim=1).unsqueeze(-1) + 1) - ).clamp(min=pruning_paras['DTM_alpha']) - - counts = assign_one_hot.sum(dim=1).clamp(min=1).unsqueeze(-1) - aggregated_hidden = ( - torch.bmm( - assign_one_hot.transpose(1, 2), cur_seg_token_filtered - ) - / counts - ) - - cur_context_tokens = ( - avg_weights * cur_context_tokens - + (1 - avg_weights) * aggregated_hidden - ) - - final_tokens.append( - cur_context_tokens.view(-1, hidden_states_dim) - ) - keep_indexs += (cur_context_global_indexes.view(-1),) - - idx_seg_start += seg_i_len - - hidden_states = torch.cat(final_tokens, dim=0) - keep_indexs = torch.cat(keep_indexs, dim=0) - - sorted_indexs = torch.argsort(keep_indexs) - hidden_states = hidden_states[sorted_indexs].unsqueeze(0) - keep_indexs = keep_indexs[sorted_indexs] - - if causal_mask is not None: - kwargs['attention_mask'].fill_( - causal_mask[ - :, :, : hidden_states.shape[1], : hidden_states.shape[1] - ] - ) - - with torch.inference_mode(): - kwargs['position_ids'].resize_as_(keep_indexs.unsqueeze(0)).copy_( - keep_indexs.unsqueeze(0).clone() - ) - kwargs['cache_position'].resize_as_(keep_indexs).copy_( - keep_indexs.clone() - ) - - new_pe0 = position_embeddings[0][:, keep_indexs, :].clone() - new_pe1 = position_embeddings[1][:, keep_indexs, :].clone() - position_embeddings[0].resize_as_(new_pe0).copy_(new_pe0) - position_embeddings[1].resize_as_(new_pe0).copy_(new_pe1) - - args[0].resize_as_(hidden_states).copy_(hidden_states.clone()) - - ############################################################## - - return (hidden_states,), kwargs - - self.model.vlm_model.get_model().mm_projector.register_forward_pre_hook( - functools.partial(vision_abstract_hook, pruning_pars=self.pruning_paras), - with_kwargs=True, - ) - - self.blocks[0].register_forward_pre_hook( - functools.partial(fastvid_hook, pruning_paras=self.pruning_paras), - with_kwargs=True, - ) diff --git a/llmc/compression/token_reduction/holitom.py b/llmc/compression/token_reduction/holitom.py deleted file mode 100644 index 27f601c50..000000000 --- a/llmc/compression/token_reduction/holitom.py +++ /dev/null @@ -1,1500 +0,0 @@ -import functools -import math -import random -import re -from abc import ABC -from types import MethodType -from typing import Any, List, Optional, Tuple, Union - -import torch -import torch.nn as nn -import torch.nn.functional as F -from loguru import logger - -try: - from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX - from llava.mm_utils import get_anyres_image_grid_shape - from llava.model.llava_arch import LlavaMetaForCausalLM, unpad_image - from llava.model.multimodal_encoder.siglip_encoder import ( - SigLipEncoder, SigLipVisionTower) - from llava.utils import rank0_print - from transformers.modeling_outputs import (BaseModelOutput, - BaseModelOutputWithPooling) -except ImportError: - pass - -from llmc.utils.registry_factory import TOKEN_REDUCTION_REGISTRY - -from .token_reduction_module import TokenReductionModule - - -def SigLipEncoder_forward( - self, - inputs_embeds, - attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, -): - output_attentions = ( - output_attentions - if output_attentions is not None - else self.config.output_attentions - ) - output_hidden_states = ( - output_hidden_states - if output_hidden_states is not None - else self.config.output_hidden_states - ) - return_dict = ( - return_dict if return_dict is not None else self.config.use_return_dict - ) - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - - hidden_states = inputs_embeds - for encoder_layer in self.layers: - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - if self.gradient_checkpointing and self.training: - layer_outputs = self._gradient_checkpointing_func( - encoder_layer.__call__, - hidden_states, - attention_mask, - output_attentions, - ) - else: - layer_outputs = encoder_layer( - hidden_states, - attention_mask, - output_attentions=output_attentions, - ) - - hidden_states = layer_outputs[0] - - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - if not return_dict: - return tuple( - v for v in [hidden_states, encoder_states, all_attentions] if v is not None - ) - return BaseModelOutput( - last_hidden_state=hidden_states, - hidden_states=encoder_states, - attentions=all_attentions, - ) - - -def SigLipVisionTower_forward(self, images): - if type(images) is list: - image_features = [] - for image in images: - image_forward_out = self.vision_tower( - image.to(device=self.device, dtype=self.dtype).unsqueeze(0), - output_hidden_states=True, - ) - image_feature = image_forward_out.hidden_states[-1].to(image.dtype) - assert image_features.shape[-2] == 729 - image_features.append(image_feature) - else: - import os - - wrapper = os.environ.get('WRAPPER') - if wrapper in ['visionzip']: - image_forward_outs = self.vision_tower( - images.to(device=self.device, dtype=self.dtype), - output_hidden_states=True, - output_attentions=True, - ) - attn_weights = image_forward_outs.attentions[-1] - hidden_states = image_forward_outs.hidden_states[-1] - metric = self.vision_tower.vision_model.encoder.layers[-1].metric - return ( - hidden_states, - attn_weights.mean(dim=1).mean(dim=1), - metric, - images.dtype, - ) - if wrapper in ['holitom']: - image_forward_outs = self.vision_tower( - images.to(device=self.device, dtype=self.dtype), - output_hidden_states=True, - output_attentions=True, - ) - attn_weights = image_forward_outs.attentions[-1] - hidden_states = image_forward_outs.hidden_states[-1] - return ( - hidden_states, - attn_weights.mean(dim=1).mean(dim=1), - None, - images.dtype, - ) - else: - image_forward_outs = self.vision_tower( - images.to(device=self.device, dtype=self.dtype), - output_hidden_states=True, - ) - image_features = image_forward_outs.hidden_states[-1].to(images.dtype) - assert image_features.shape[-2] == 729 - - return image_features - - -class LlavaMetaForCausalLM_holitom(ABC): - - def encode_images(self, images): - image_features, _ = self.get_model().get_vision_tower()(images) - # image_features = self.get_model().vision_resampler(image_features, images=images) - image_features = self.get_model().mm_projector(image_features) - return image_features - - def encode_images_multi(self, images): - image_features, attn_weights, metric, images_dtype = ( - self.get_model().get_vision_tower()(images) - ) - # image_features = self.get_model().vision_resampler(image_features, images=images) - image_features = self.get_model().mm_projector(image_features) - return image_features, attn_weights, metric, images_dtype - - def cluster_dpc_knn(self, x, cluster_num, k=7): - with torch.no_grad(): - batch_size, seq_len, embed_dim = x.shape - - dist_matrix = torch.cdist(x.float(), x.float()) / ( - embed_dim**0.5 - ) # (batch_size, seq_len, seq_len) - - # get local density - dist_nearest, index_nearest = torch.topk( - dist_matrix, k, dim=-1, largest=False - ) # (batch_size, seq_len, k) - density = (-(dist_nearest**2).mean(dim=-1)).exp() # (batch_size, seq_len) - # add a little noise to ensure no tokens have the same density. - density = ( - density - + torch.rand(density.shape, device=density.device, dtype=density.dtype) - * 1e-6 - ) - - # get distance indicator - mask = (density[:, None, :] > density[:, :, None]).type(x.dtype) - dist_max = dist_matrix.flatten(1).max(dim=-1).values[:, None, None] - dist, index_parent = (dist_matrix * mask + dist_max * (1 - mask)).min( - dim=-1 - ) - - # select the cluster center according to the score - score = dist * density - _, index_center = score.topk(cluster_num, dim=-1) - - return index_center, dist_matrix - - def select_static_windows(self, feature_sim, batch_size, tau, max_window_size): - # pruned_static_count[s,e] - pruned_static_count = torch.zeros( - (batch_size, batch_size), device=feature_sim.device - ) - for start in range(0, batch_size): - for end in range(start + 1, batch_size): - static_feature_count = ( - torch.all(feature_sim[start:end, :] > tau, dim=0).sum().item() - ) - pruned_static_count[start, end] = static_feature_count * ( - end - start - ) # window_len = end - start + 1 - - dp = torch.zeros(batch_size, device=pruned_static_count.device) - prev = torch.zeros( - batch_size, dtype=torch.long, device=pruned_static_count.device - ) - # [prev[i], i] - - for i in range(batch_size): - max_val = dp[i - 1] if i > 0 else 0 - best_j = i - - for window_size in range(2, min(i + 1, max_window_size) + 1): - j = i - window_size - current_val = (dp[j] if j >= 0 else 0) + pruned_static_count[ - j + 1, i - ] # [-, j] + [j+1, i] - if current_val > max_val: - max_val = current_val - best_j = j + 1 - - dp[i] = max_val - prev[i] = best_j # [best_j, i] - - selected_frames = [] - i = batch_size - 1 - while i >= 0: - selected_frames.append((prev[i].item(), i)) - i = prev[i].item() - 1 - - selected_frames = selected_frames[::-1] - total_reduced = dp[-1].item() - - return selected_frames, total_reduced - - def merge_tokens_by_clustering( - self, feat, target_indices, dist_matrix, cluster_num, Beta - ): - batch_size, seq_len, embed_dim = feat.shape - all_indices = torch.arange(seq_len, device=feat.device) - all_indices = all_indices.unsqueeze(0).expand( - batch_size, -1 - ) # (batch_size, seq_len) - non_target_indices = torch.zeros( - (batch_size, seq_len - cluster_num), dtype=torch.long, device=feat.device - ) - for b in range(batch_size): - non_target_mask = ~torch.isin(all_indices[b], target_indices[b]) - non_target_indices[b] = all_indices[b][non_target_mask] - # non_target_indices (batch_size, seq_len-cluster_num) - - non_target_feat = torch.gather( - feat, - dim=1, - index=non_target_indices.unsqueeze(-1).expand(-1, -1, feat.size(-1)), - ) # (batch_size, seq_len-cluster_num, embed_dim) - - dist_matrix = torch.gather( - dist_matrix, - dim=1, - index=non_target_indices.unsqueeze(-1).expand(-1, -1, dist_matrix.size(-1)), - ) # (batch_size, seq_len-cluster_num, seq_len) - dist_matrix = torch.gather( - dist_matrix, - dim=2, - index=target_indices.unsqueeze(1).expand(-1, dist_matrix.size(1), -1), - ) # (batch_size, seq_len-cluster_num, cluster_num) - - idx_cluster = torch.argmin( - dist_matrix, dim=-1 - ) # (batch_size, seq_len-cluster_num) - - cluster_tokens = [] - for b in range(batch_size): - batch_tokens = [] - for i in range(cluster_num): - mask = idx_cluster[b] == i - if mask.any(): - cluster_features = non_target_feat[b][mask] - import os - - if os.environ.get('NO_BETA', '0') == '0': - # rank0_print("USE_BETA") - cluster_means = cluster_features.mean(dim=0) - batch_tokens.append( - Beta * feat[b][target_indices[b][i]] - + (1 - Beta) * cluster_means - ) - else: - # rank0_print("NO_BETA") - all_features = torch.cat( - [ - feat[b][target_indices[b][i]].unsqueeze(0), - cluster_features, - ], - dim=0, - ) - batch_tokens.append(all_features.mean(dim=0)) - else: - batch_tokens.append(feat[b][target_indices[b][i]]) - cluster_tokens.append(torch.stack(batch_tokens)) - cluster_tokens = torch.stack( - cluster_tokens - ) # shape: (batch_size, cluster_num, embed_dim) - - return cluster_tokens - - def merge_tokens_by_attention_density( - self, feat, attn, pos, retain_ratio, D, Beta, K - ): - batch_size, seq_len, embed_dim = feat.shape - dominant_num = round(math.ceil(seq_len * retain_ratio) * (1 - D)) - contextual_num = math.ceil(seq_len * retain_ratio) - dominant_num - - # Dominant Visual Tokens - if dominant_num > 0: - all_indices = attn.topk(dominant_num, dim=1).indices - mask = torch.ones_like( - feat[:, :, 0], dtype=torch.bool, device=feat.device - ).scatter_( - 1, all_indices, False - ) # (batch_size, seq_len) False means retained tokens - # finally, (batch_size, dominant_num, embed_dim) compare with feat - dominant_tokens = feat.masked_select(~mask.unsqueeze(-1)).view( - batch_size, dominant_num, embed_dim - ) - dominant_pos = pos.masked_select(~mask).view(batch_size, dominant_num) - else: - mask = torch.ones_like(feat[:, :, 0], dtype=torch.bool, device=feat.device) - dominant_tokens = torch.empty( - (batch_size, 0, embed_dim), device=feat.device - ) - dominant_pos = torch.empty((batch_size, 0), device=feat.device) - - # Contextual Visual Tokens - if contextual_num > 0: - # Filter - # feat_filtered: (batch_size, seq_len-dominant_num, embed_dim) - feat_filtered = feat.masked_select(mask.unsqueeze(-1)).view( - batch_size, seq_len - dominant_num, embed_dim - ) - contextual_pos = pos.masked_select(mask.unsqueeze(-1)).view( - batch_size, seq_len - dominant_num - ) - target_indices, dist_matrix = self.cluster_dpc_knn( - feat_filtered, contextual_num, k=min(K, contextual_num) - ) - target_indices = torch.sort(target_indices, dim=-1)[0] - contextual_pos = torch.stack( - [contextual_pos[b][target_indices[b]] for b in range(batch_size)] - ) # (batch_size, contextual_num) - # target_indices (batch_size, contextual_num) - # dist_matrix (batch_size, seq_len-dominant_num, seq_len-dominant_num) - # assign tokens to the nearest center - - contextual_tokens = self.merge_tokens_by_clustering( - feat_filtered, target_indices, dist_matrix, contextual_num, Beta - ) - else: - contextual_tokens = torch.empty( - (batch_size, 0, embed_dim), device=feat.device - ) - contextual_pos = torch.empty((batch_size, 0), device=feat.device) - - image_feat = [] - image_pos = [] - for b in range(batch_size): - batch_tokens = torch.cat([dominant_tokens[b], contextual_tokens[b]], dim=0) - batch_pos = torch.cat([dominant_pos[b], contextual_pos[b]], dim=0) - image_feat.append(batch_tokens) - image_pos.append(batch_pos) - image_feat = torch.stack( - image_feat - ) # shape: (batch_size, dominant_num + contextual_num, embed_dim) - image_pos = torch.stack(image_pos) - - return image_feat, image_pos - - def merge_tokens_by_density(self, feat, pos, retain_ratio, Beta, K): - batch_size, seq_len, embed_dim = feat.shape - cluster_num = round(seq_len * retain_ratio) - if cluster_num > 0: - target_indices, dist_matrix = self.cluster_dpc_knn( - feat, cluster_num, k=min(K, cluster_num) - ) - target_indices = torch.sort(target_indices, dim=-1)[0] - image_pos = torch.stack( - [pos[b][target_indices[b]] for b in range(batch_size)] - ) - - cluster_tokens = self.merge_tokens_by_clustering( - feat, target_indices, dist_matrix, cluster_num, Beta - ) - image_feat = cluster_tokens - else: - image_feat = torch.empty((batch_size, 0, embed_dim), device=feat.device) - image_pos = torch.empty((batch_size, 0), device=feat.device) - - return image_feat, image_pos - - def add_newline_token(self, feat, pos, grid_size, newline_token): - row_pos = pos // grid_size - expanded_feat_list = [] - for cur_feat, cur_row_pos in zip(feat, row_pos): - expanded_feat = [] - for row in range(grid_size): - find_row_feat = cur_feat[cur_row_pos == row] - if len(find_row_feat) > 0: - expanded_feat.append( - torch.cat((find_row_feat, newline_token), dim=0) - ) - else: - expanded_feat.append(find_row_feat) - batch_feat = torch.cat(expanded_feat, dim=0) - expanded_feat_list.append(batch_feat) - - image_feat = torch.cat(expanded_feat_list, dim=0) - return image_feat - - def holitom( - self, - static_feat, - dynamic_feat, - dynamic_attn, - static_pos, - dynamic_pos, - window_size, - retain_ratio, - D, - Beta, - K, - images_dtype, - mm_newline_position, - ): - newline_token = ( - self.model.image_newline[None].to(static_feat.device) - if mm_newline_position == 'grid' - else None - ) - grid_size = int(math.sqrt(dynamic_feat.shape[1] + static_feat.shape[0])) - - if window_size == 1: - dynamic_feat, dynamic_pos = self.merge_tokens_by_attention_density( - dynamic_feat, dynamic_attn, dynamic_pos, retain_ratio, D, Beta, K - ) - if mm_newline_position != 'grid': - feat = dynamic_feat.flatten(0, 1) - else: - dynamic_pos, sorted_indices = torch.sort(dynamic_pos, dim=1) - dynamic_feat = torch.gather( - dynamic_feat, - 1, - sorted_indices.unsqueeze(-1).expand(-1, -1, dynamic_feat.shape[-1]), - ) - dynamic_feat = self.add_newline_token( - dynamic_feat, dynamic_pos, grid_size, newline_token - ) - - feat = dynamic_feat - - return feat.to(images_dtype) - else: - dynamic_feat, dynamic_pos = self.merge_tokens_by_attention_density( - dynamic_feat, dynamic_attn, dynamic_pos, retain_ratio, D, Beta, K - ) - static_feat, static_pos = self.merge_tokens_by_density( - static_feat.unsqueeze(0), static_pos, retain_ratio, Beta, K - ) - if mm_newline_position != 'grid': - feat = torch.cat( - [static_feat.flatten(0, 1), dynamic_feat.flatten(0, 1)] - ) - else: - first_dynamic_feat = dynamic_feat[0:1, :] - first_dynamic_pos = dynamic_pos[0:1, :] - first_feat = torch.cat( - [static_feat, first_dynamic_feat], dim=1 - ) # (batch_size, first_frame_tokens, embed_dim) - first_pos = torch.cat([static_pos, first_dynamic_pos], dim=1) - - # Sort tokens by their original positions - first_pos, first_sorted_indices = torch.sort(first_pos, dim=1) - first_feat = torch.gather( - first_feat, - 1, - first_sorted_indices.unsqueeze(-1).expand( - -1, -1, first_feat.shape[-1] - ), - ) - - first_feat = self.add_newline_token( - first_feat, first_pos, grid_size, newline_token - ) - - other_feat = dynamic_feat[1:, :] - other_pos = dynamic_pos[1:, :] - other_pos, other_sorted_indices = torch.sort(other_pos, dim=1) - other_feat = torch.gather( - other_feat, - 1, - other_sorted_indices.unsqueeze(-1).expand( - -1, -1, other_feat.shape[-1] - ), - ) - other_feat = self.add_newline_token( - other_feat, other_pos, grid_size, newline_token - ) - - feat = torch.cat([first_feat, other_feat]) - - return feat.to(images_dtype) - - def get_static_dynamic_features( - self, image_feat, attn_weights, selected_frames, feature_sim, tau - ): - # attn_weights: (batch_size, seq_len) - batch_size, seq_len, embed_dim = image_feat.shape - static_feat_list, dynamic_feat_list, _, dynamic_attn_list = [], [], [], [] - static_pos_list, dynamic_pos_list = [], [] - for start, end in selected_frames: - all_indices = torch.arange(seq_len, device=image_feat.device).unsqueeze( - 0 - ) # (1, seq_len) - if start == end: - static_feat_list.append( - torch.empty((0, embed_dim), device=image_feat.device) - ) - # static_attn_list.append(torch.empty((0,), device=attn_weights.device)) - dynamic_feat_list.append(image_feat[start: end + 1]) - dynamic_attn_list.append(attn_weights[start: end + 1]) - - static_pos_list.append( - torch.empty((0, seq_len), device=image_feat.device) - ) - dynamic_pos_list.append(all_indices) - else: - windows_size = end - start + 1 - mask = torch.all(feature_sim[start:end, :] > tau, dim=0) - static_feat = image_feat[start: end + 1, mask] - # static_attn = attn_weights[start:end+1, mask] - dynamic_feat = image_feat[start: end + 1, ~mask] - dynamic_attn = attn_weights[start: end + 1, ~mask] - - static_feat_list.append(static_feat.mean(dim=0)) - # static_attn_list.append(static_attn.mean(dim=0)) - dynamic_feat_list.append(dynamic_feat) - dynamic_attn_list.append(dynamic_attn) - - static_pos_list.append(all_indices[:, mask].expand(1, -1)) - dynamic_pos_list.append(all_indices[:, ~mask].expand(windows_size, -1)) - - return ( - static_feat_list, - dynamic_feat_list, - _, - dynamic_attn_list, - static_pos_list, - dynamic_pos_list, - ) - - def prepare_inputs_labels_for_multimodal( - self, - input_ids, - position_ids, - attention_mask, - past_key_values, - labels, - images, - modalities=['image'], - image_sizes=None, - ): - import os - - vision_tower = self.get_vision_tower() - # rank_print(modalities) - if vision_tower is None or images is None or input_ids.shape[1] == 1: - return ( - input_ids, - position_ids, - attention_mask, - past_key_values, - None, - labels, - ) - - if isinstance(modalities, str): - modalities = [modalities] - - # import pdb; pdb.set_trace() - if type(images) is list or images.ndim == 5: - mm_patch_merge_type = getattr(self.config, 'mm_patch_merge_type', 'flat') - image_aspect_ratio = getattr(self.config, 'image_aspect_ratio', 'square') - mm_newline_position = getattr( - self.config, 'mm_newline_position', 'one_token' - ) - - if type(images) is list: - images = [x.unsqueeze(0) if x.ndim == 3 else x for x in images] - - video_idx_in_batch = [] - for _ in range(len(modalities)): - if modalities[_] == 'video': - video_idx_in_batch.append(_) - - images_list = [] - for image in images: - if image.ndim == 4: - images_list.append(image) - else: - images_list.append(image.unsqueeze(0)) - - concat_images = torch.cat([image for image in images_list], dim=0) - split_sizes = [image.shape[0] for image in images_list] - encoded_image_features, attn_weights, _, images_dtype = ( - self.encode_images_multi(concat_images) - ) - retain_ratio = self.pruning_paras.get('RETAIN_RATIO', 0.1) - # C = int(os.environ.get("C", 8)) - # tau = float(os.environ.get("T", 0.8)) - tau = self.pruning_paras.get('T', 0.1) - # P = int(os.environ.get("P", 4)) - Beta = float(os.environ.get('BETA', 0.6)) - D = float(os.environ.get('D', 0)) - K = int(os.environ.get('K', 7)) - max_window_size = int(os.environ.get('MAX_WINDOW_SIZE', 1024)) - # NO_BETA = os.environ.get('NO_BETA', '1') - # rank0_print(f"retain_ratio: {retain_ratio}, - # tau: {tau}, Beta: {Beta}, D: {D}, K: {K}, - # max_window_size: {max_window_size}, NO_BETA: {NO_BETA}") - # image_features,all_faster_video_features = - # self.encode_multimodals(concat_images, video_idx_in_batch, split_sizes) - - # This is a list, each element is [num_images, patch * patch, dim] - # rank_print(f"Concat images : {concat_images.shape}") - encoded_image_features = torch.split(encoded_image_features, split_sizes) - image_features = [] - for idx, image_feat in enumerate(encoded_image_features): - if idx in video_idx_in_batch: - # [modify] - # image_features.append(self.get_2dPool(image_feat)) - # image_feat: (batch_size, seq_len, embed_dim) - # attn_weights: (batch_size, seq_len) - pooled_image_feat = self.get_2dPool( - image_feat - ) # (batch_size, seq_len', embed_dim) - attn_weights = attn_weights.unsqueeze(-1) - attn_weights = self.get_2dPool(attn_weights) - attn_weights = attn_weights.squeeze(-1) # (batch_size, seq_len') - - batch_size, seq_len, embed_dim = pooled_image_feat.shape - - pooled_image_feat_normed = torch.nn.functional.normalize( - pooled_image_feat, p=2, dim=-1 - ) - feature_sim = torch.nn.functional.cosine_similarity( - pooled_image_feat_normed[:-1], - pooled_image_feat_normed[1:], - dim=-1, - ) # (batch_size-1, seq_len') - - selected_frames, total_reduced = self.select_static_windows( - feature_sim, batch_size, tau, max_window_size - ) - # rank0_print(f"Selected frames: {selected_frames}") - # rank0_print(f"Total reduced features: {total_reduced}") - - total_tokens = batch_size * seq_len - retain_ratio = min( - retain_ratio / ((total_tokens - total_reduced) / total_tokens), - 1, - ) - # rank0_print(f"After static pruning, retain ratio: {retain_ratio}") - - ( - static_feat, - dynamic_feat, - _, - dynamic_attn, - static_pos, - dynamic_pos, - ) = self.get_static_dynamic_features( - pooled_image_feat, - attn_weights, - selected_frames, - feature_sim, - tau, - ) - - segment_features = [] - for idx, (start, end) in enumerate(selected_frames): - window_size = end - start + 1 - segment_features.append( - self.holitom( - static_feat[idx], - dynamic_feat[idx], - dynamic_attn[idx], - static_pos[idx], - dynamic_pos[idx], - window_size, - retain_ratio, - D, - Beta, - K, - images_dtype, - mm_newline_position, - ) - ) - image_features.append(torch.cat(segment_features, dim=0)) - - else: - image_features.append(image_feat) - # image_features = - # self.encode_multimodals(concat_images, video_idx_in_batch, split_sizes) - # rank_print(f"Encoded image feats : {[x.shape for x in image_features]}") - # image_features = torch.split(image_features, split_sizes, dim=0) - - if mm_patch_merge_type == 'flat': - image_features = [x.flatten(0, 1) for x in image_features] - - elif mm_patch_merge_type.startswith('spatial'): - new_image_features = [] - for image_idx, image_feature in enumerate(image_features): - # FIXME: now assume the image is square, and split to 2x2 patches - # num_patches = h * w, where h = w = sqrt(num_patches) - # currently image_feature is a tensor of shape (4, num_patches, hidden_size) - # we want to first unflatten it to (2, 2, h, w, hidden_size) - # rank0_print("At least we are reaching here") - # import pdb; pdb.set_trace() - if image_idx in video_idx_in_batch: # video operations - # rank0_print("Video") - if mm_newline_position == 'grid': - new_image_features.append(image_feature) - elif mm_newline_position == 'frame': - # Frame-wise - image_feature = self.add_token_per_frame(image_feature) - - new_image_features.append(image_feature.flatten(0, 1)) - - elif mm_newline_position == 'one_token': - # one-token - # image_feature = image_feature.flatten(0, 1) - if 'unpad' in mm_patch_merge_type: - image_feature = torch.cat( - ( - image_feature, - self.model.image_newline[None].to( - image_feature.device - ), - ), - dim=0, - ) - new_image_features.append(image_feature) - elif mm_newline_position == 'no_token': - new_image_features.append(image_feature.flatten(0, 1)) - else: - raise ValueError( - f'Unexpected mm_newline_position: {mm_newline_position}' - ) - elif ( - image_feature.shape[0] > 1 - ): # multi patches and multi images operations - # rank0_print("Single-images") - base_image_feature = image_feature[0] - image_feature = image_feature[1:] - height = width = self.get_vision_tower().num_patches_per_side - assert height * width == base_image_feature.shape[0] - - if 'anyres_max' in image_aspect_ratio: - matched_anyres_max_num_patches = re.match( - r'anyres_max_(\d+)', image_aspect_ratio - ) - if matched_anyres_max_num_patches: - max_num_patches = int( - matched_anyres_max_num_patches.group(1) - ) - - if ( - image_aspect_ratio == 'anyres' - or 'anyres_max' in image_aspect_ratio - ): - if hasattr(self.get_vision_tower(), 'image_size'): - vision_tower_image_size = ( - self.get_vision_tower().image_size - ) - else: - raise ValueError( - 'vision_tower_image_size is not found in the vision tower.' - ) - try: - num_patch_width, num_patch_height = ( - get_anyres_image_grid_shape( - image_sizes[image_idx], - self.config.image_grid_pinpoints, - vision_tower_image_size, - ) - ) - except Exception as e: - rank0_print(f'Error: {e}') - num_patch_width, num_patch_height = 2, 2 - image_feature = image_feature.view( - num_patch_height, num_patch_width, height, width, -1 - ) - else: - image_feature = image_feature.view(2, 2, height, width, -1) - - if 'maxpool2x2' in mm_patch_merge_type: - image_feature = image_feature.permute( - 4, 0, 2, 1, 3 - ).contiguous() - image_feature = image_feature.flatten(1, 2).flatten(2, 3) - image_feature = nn.functional.max_pool2d(image_feature, 2) - image_feature = image_feature.flatten(1, 2).transpose(0, 1) - elif ( - 'unpad' in mm_patch_merge_type - and 'anyres_max' in image_aspect_ratio - and matched_anyres_max_num_patches - ): - unit = image_feature.shape[2] - image_feature = image_feature.permute( - 4, 0, 2, 1, 3 - ).contiguous() - image_feature = image_feature.flatten(1, 2).flatten(2, 3) - image_feature = unpad_image( - image_feature, image_sizes[image_idx] - ) - c, h, w = image_feature.shape - times = math.sqrt(h * w / (max_num_patches * unit**2)) - if times > 1.1: - image_feature = image_feature[None] - image_feature = nn.functional.interpolate( - image_feature, - [int(h // times), int(w // times)], - mode='bilinear', - )[0] - image_feature = torch.cat( - ( - image_feature, - self.model.image_newline[:, None, None] - .expand(*image_feature.shape[:-1], 1) - .to(image_feature.device), - ), - dim=-1, - ) - image_feature = image_feature.flatten(1, 2).transpose(0, 1) - elif 'unpad' in mm_patch_merge_type: - image_feature = image_feature.permute( - 4, 0, 2, 1, 3 - ).contiguous() - image_feature = image_feature.flatten(1, 2).flatten(2, 3) - image_feature = unpad_image( - image_feature, image_sizes[image_idx] - ) - image_feature = torch.cat( - ( - image_feature, - self.model.image_newline[:, None, None] - .expand(*image_feature.shape[:-1], 1) - .to(image_feature.device), - ), - dim=-1, - ) - image_feature = image_feature.flatten(1, 2).transpose(0, 1) - else: - image_feature = image_feature.permute( - 0, 2, 1, 3, 4 - ).contiguous() - image_feature = image_feature.flatten(0, 3) - if 'nobase' in mm_patch_merge_type: - pass - else: - image_feature = torch.cat( - (base_image_feature, image_feature), dim=0 - ) - new_image_features.append(image_feature) - else: # single image operations - image_feature = image_feature[0] - if 'unpad' in mm_patch_merge_type: - image_feature = torch.cat( - (image_feature, self.model.image_newline[None]), dim=0 - ) - - new_image_features.append(image_feature) - image_features = new_image_features - else: - raise ValueError( - f'Unexpected mm_patch_merge_type: {self.config.mm_patch_merge_type}' - ) - else: - image_features = self.encode_images(images) - - # TODO: image start / end is not implemented here to support pretraining. - if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr( - self.config, 'mm_use_im_start_end', False - ): - raise NotImplementedError - # rank_print(f"Total images : {len(image_features)}") - - # Let's just add dummy tensors if they do not exist, - # it is a headache to deal with None all the time. - # But it is not ideal, and if you have a better idea, - # please open an issue / submit a PR, thanks. - _labels = labels - _position_ids = position_ids - _attention_mask = attention_mask - if attention_mask is None: - attention_mask = torch.ones_like(input_ids, dtype=torch.bool) - else: - attention_mask = attention_mask.bool() - if position_ids is None: - position_ids = torch.arange( - 0, input_ids.shape[1], dtype=torch.long, device=input_ids.device - ) - if labels is None: - labels = torch.full_like(input_ids, IGNORE_INDEX) - - # remove the padding using attention_mask -- FIXME - # _input_ids = input_ids - input_ids = [ - cur_input_ids[cur_attention_mask] - for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask) - ] - labels = [ - cur_labels[cur_attention_mask] - for cur_labels, cur_attention_mask in zip(labels, attention_mask) - ] - - new_input_embeds = [] - new_labels = [] - if ( - self.pruning_paras.get('HOLITOM_k', None) is not None - and self.pruning_paras.get('HOLITOM_r', None) is not None - ): - # [modified] - image_token_posi = [] - prompt_len = [] - cur_image_idx = 0 - # rank_print("Inserting Images embedding") - for batch_idx, cur_input_ids in enumerate(input_ids): - if ( - self.pruning_paras.get('HOLITOM_k', None) is not None - and self.pruning_paras.get('HOLITOM_r', None) is not None - ): - # [modified] - # record image position for further dropping - image_index = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[ - 0 - ].tolist() - if image_index == []: - image_token_posi.append(-1) - else: - image_token_posi.append(image_index[0]) - - # record input instruction length in inference mode - if not self.training: - if image_index == []: - prompt_len.append(cur_input_ids.shape[0]) - else: - prompt_len.append( - cur_input_ids.shape[0] - 1 - ) # consider image place holder - - num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum() - # rank0_print(num_images) - if num_images == 0: - cur_image_features = image_features[cur_image_idx] - cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids) - cur_input_embeds = torch.cat( - [cur_input_embeds_1, cur_image_features[0:0]], dim=0 - ) - new_input_embeds.append(cur_input_embeds) - new_labels.append(labels[batch_idx]) - cur_image_idx += 1 - continue - - image_token_indices = ( - [-1] - + torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() - + [cur_input_ids.shape[0]] - ) - cur_input_ids_noim = [] - cur_labels = labels[batch_idx] - cur_labels_noim = [] - for i in range(len(image_token_indices) - 1): - cur_input_ids_noim.append( - cur_input_ids[ - image_token_indices[i] + 1: image_token_indices[i + 1] - ] - ) - cur_labels_noim.append( - cur_labels[image_token_indices[i] + 1: image_token_indices[i + 1]] - ) - # [modify] - # text_token_count = sum([x.shape[0] for x in cur_labels_noim]) - # vision_token_count = len(image_features[cur_image_idx]) - # rank0_print(f"Batch {batch_idx}: - # Text tokens: {text_token_count} Original Vision tokens: {vision_token_count}") - - split_sizes = [x.shape[0] for x in cur_labels_noim] - cur_input_embeds = self.get_model().embed_tokens( - torch.cat(cur_input_ids_noim) - ) - cur_input_embeds_no_im = torch.split(cur_input_embeds, split_sizes, dim=0) - cur_new_input_embeds = [] - cur_new_labels = [] - - for i in range(num_images + 1): - cur_new_input_embeds.append(cur_input_embeds_no_im[i]) - cur_new_labels.append(cur_labels_noim[i]) - if i < num_images: - try: - cur_image_features = image_features[cur_image_idx] - except IndexError: - cur_image_features = image_features[cur_image_idx - 1] - cur_image_idx += 1 - cur_new_input_embeds.append(cur_image_features) - cur_new_labels.append( - torch.full( - (cur_image_features.shape[0],), - IGNORE_INDEX, - device=cur_labels.device, - dtype=cur_labels.dtype, - ) - ) - - cur_new_input_embeds = [x.to(self.device) for x in cur_new_input_embeds] - - # import pdb; pdb.set_trace() - cur_new_input_embeds = torch.cat(cur_new_input_embeds) - cur_new_labels = torch.cat(cur_new_labels) - - new_input_embeds.append(cur_new_input_embeds) - new_labels.append(cur_new_labels) - - if ( - self.pruning_paras.get('HOLITOM_k', None) is not None - and self.pruning_paras.get('HOLITOM_r', None) is not None - ): - # [modified] - self.model.image_token_posi = image_token_posi - self.model.prompt_len = prompt_len - self.model.image_tokens = [ - image_feature.shape[0] for image_feature in image_features - ] - - # Truncate sequences to max length as image embeddings can make the sequence longer - tokenizer_model_max_length = getattr( - self.config, 'tokenizer_model_max_length', None - ) - # rank_print("Finishing Inserting") - - new_input_embeds = [ - x[:tokenizer_model_max_length] - for x, modality in zip(new_input_embeds, modalities) - ] - new_labels = [ - x[:tokenizer_model_max_length] - for x, modality in zip(new_labels, modalities) - ] - - # Combine them - max_len = max(x.shape[0] for x in new_input_embeds) - batch_size = len(new_input_embeds) - - new_input_embeds_padded = [] - new_labels_padded = torch.full( - (batch_size, max_len), - IGNORE_INDEX, - dtype=new_labels[0].dtype, - device=new_labels[0].device, - ) - attention_mask = torch.zeros( - (batch_size, max_len), - dtype=attention_mask.dtype, - device=attention_mask.device, - ) - position_ids = torch.zeros( - (batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device - ) - # rank0_print("Prepare pos id") - - for i, (cur_new_embed, cur_new_labels) in enumerate( - zip(new_input_embeds, new_labels) - ): - cur_len = cur_new_embed.shape[0] - if getattr(self.config, 'tokenizer_padding_side', 'right') == 'left': - new_input_embeds_padded.append( - torch.cat( - ( - torch.zeros( - (max_len - cur_len, cur_new_embed.shape[1]), - dtype=cur_new_embed.dtype, - device=cur_new_embed.device, - ), - cur_new_embed, - ), - dim=0, - ) - ) - if cur_len > 0: - new_labels_padded[i, -cur_len:] = cur_new_labels - attention_mask[i, -cur_len:] = True - position_ids[i, -cur_len:] = torch.arange( - 0, cur_len, dtype=position_ids.dtype, device=position_ids.device - ) - else: - new_input_embeds_padded.append( - torch.cat( - ( - cur_new_embed, - torch.zeros( - (max_len - cur_len, cur_new_embed.shape[1]), - dtype=cur_new_embed.dtype, - device=cur_new_embed.device, - ), - ), - dim=0, - ) - ) - if cur_len > 0: - new_labels_padded[i, :cur_len] = cur_new_labels - attention_mask[i, :cur_len] = True - position_ids[i, :cur_len] = torch.arange( - 0, cur_len, dtype=position_ids.dtype, device=position_ids.device - ) - - new_input_embeds = torch.stack(new_input_embeds_padded, dim=0) - # rank0_print("tokenizer padding") - - if _labels is None: - new_labels = None - else: - new_labels = new_labels_padded - - if _attention_mask is None: - attention_mask = None - else: - attention_mask = attention_mask.to(dtype=_attention_mask.dtype) - - if _position_ids is None: - position_ids = None - if getattr(self.config, 'use_pos_skipping', False) and self.training: - position_ids = ( - torch.arange(new_input_embeds.size(1), device=new_input_embeds.device) - .unsqueeze(0) - .to(new_input_embeds.device) - ) - split_position = random.randint(0, new_input_embeds.size(1)) - left_add = random.randint(0, self.config.pos_skipping_range) - right_add = random.randint(left_add, self.config.pos_skipping_range) - position_ids[:, :split_position] += left_add - position_ids[:, split_position:] += right_add - # import pdb; pdb.set_trace() - # rank0_print("Finish preparing") - return ( - None, - position_ids, - attention_mask, - past_key_values, - new_input_embeds, - new_labels, - ) - - -@TOKEN_REDUCTION_REGISTRY.register('HoliTom') -class HoliTom(TokenReductionModule): - def __init__(self, config, model, blocks): - super().__init__(config, model, blocks) - self.add_sparse_config() - self.register_reduction_modules() - - def add_sparse_config(self): - special_config = self.config.get('special', {}) - self.model.model.pruning_paras = special_config - self.model.model.model.pruning_paras = special_config - - if self.model.__class__.__name__ == 'Llava_OneVision': - SigLipEncoder.forward = SigLipEncoder_forward - SigLipVisionTower.forward = SigLipVisionTower_forward - - LlavaMetaForCausalLM.prepare_inputs_labels_for_multimodal = ( - LlavaMetaForCausalLM_holitom.prepare_inputs_labels_for_multimodal - ) - LlavaMetaForCausalLM.encode_images = ( - LlavaMetaForCausalLM_holitom.encode_images - ) - LlavaMetaForCausalLM.encode_images_multi = ( - LlavaMetaForCausalLM_holitom.encode_images_multi - ) - - LlavaMetaForCausalLM.holitom = LlavaMetaForCausalLM_holitom.holitom - LlavaMetaForCausalLM.cluster_dpc_knn = ( - LlavaMetaForCausalLM_holitom.cluster_dpc_knn - ) - LlavaMetaForCausalLM.select_static_windows = ( - LlavaMetaForCausalLM_holitom.select_static_windows - ) - LlavaMetaForCausalLM.get_static_dynamic_features = ( - LlavaMetaForCausalLM_holitom.get_static_dynamic_features - ) - LlavaMetaForCausalLM.merge_tokens_by_attention_density = ( - LlavaMetaForCausalLM_holitom.merge_tokens_by_attention_density - ) - LlavaMetaForCausalLM.merge_tokens_by_density = ( - LlavaMetaForCausalLM_holitom.merge_tokens_by_density - ) - LlavaMetaForCausalLM.merge_tokens_by_clustering = ( - LlavaMetaForCausalLM_holitom.merge_tokens_by_clustering - ) - LlavaMetaForCausalLM.add_newline_token = ( - LlavaMetaForCausalLM_holitom.add_newline_token - ) - - if ( - self.special_config.get('HOLITOM_k', None) is not None - and self.special_config.get('HOLITOM_r', None) is not None - ): - from functools import partial - - from transformers.cache_utils import Cache, DynamicCache - from transformers.modeling_flash_attention_utils import \ - FlashAttentionKwargs - from transformers.modeling_outputs import \ - BaseModelOutputWithPast - from transformers.processing_utils import Unpack - - def qwen_forward( - self, - input_ids: Optional[torch.LongTensor] = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_values: Optional[Cache] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - cache_position: Optional[torch.LongTensor] = None, - **flash_attn_kwargs: Unpack[FlashAttentionKwargs], - ) -> BaseModelOutputWithPast: - output_attentions = ( - output_attentions - if output_attentions is not None - else self.config.output_attentions - ) - output_hidden_states = ( - output_hidden_states - if output_hidden_states is not None - else self.config.output_hidden_states - ) - use_cache = ( - use_cache if use_cache is not None else self.config.use_cache - ) - - if (input_ids is None) ^ (inputs_embeds is not None): - raise ValueError( - 'You must specify exactly one of input_ids or inputs_embeds' - ) - - if self.gradient_checkpointing and self.training and use_cache: - logger.warning_once( - '`use_cache=True` is incompatible with gradient checkpointing.' + - 'Setting `use_cache=False`.' - ) - use_cache = False - - # TODO (joao): remove this exception in v4.56 -- - # it exists for users that try to pass a legacy cache - if not isinstance(past_key_values, (type(None), Cache)): - raise ValueError( - 'The `past_key_values` should be either a `Cache` object or `None`.' - ) - - if inputs_embeds is None: - inputs_embeds = self.embed_tokens(input_ids) - - if use_cache and past_key_values is None: - past_key_values = DynamicCache() - - if cache_position is None: - past_seen_tokens = ( - past_key_values.get_seq_length() - if past_key_values is not None - else 0 - ) - cache_position = torch.arange( - past_seen_tokens, - past_seen_tokens + inputs_embeds.shape[1], - device=inputs_embeds.device, - ) - - if position_ids is None: - position_ids = cache_position.unsqueeze(0) - - causal_mask = self._update_causal_mask( - attention_mask, - inputs_embeds, - cache_position, - past_key_values, - output_attentions, - ) - - hidden_states = inputs_embeds - - # create position embeddings to be shared across the decoder layers - position_embeddings = self.rotary_emb(hidden_states, position_ids) - - # decoder layers - all_hidden_states = () if output_hidden_states else None - all_self_attns = () if output_attentions else None - - HOLITOM_k = self.pruning_paras.get('HOLITOM_k', 3) - HOLITOM_r = self.pruning_paras.get('HOLITOM_r', 0.5) - HOLITOM_image_token_start_index = self.image_token_posi[0] - HOLITOM_image_token_length = self.image_tokens[0] - seq_length_with_past = past_seen_tokens + inputs_embeds.shape[1] - - for layer_idx, decoder_layer in enumerate( - self.layers[: self.config.num_hidden_layers] - ): - if output_hidden_states: - all_hidden_states += (hidden_states,) - - if self.gradient_checkpointing and self.training: - layer_outputs = self._gradient_checkpointing_func( - partial(decoder_layer.__call__, **flash_attn_kwargs), - hidden_states, - causal_mask, - position_ids, - past_key_values, - output_attentions, - use_cache, - cache_position, - position_embeddings, - ) - else: - if layer_idx < HOLITOM_k: - pass - elif layer_idx == HOLITOM_k and position_ids.size(1) > 1: - # compute pruned tokens, generate fastv sign - last_layer_attention = layer_outputs[1] - # compute average attention over different head - last_layer_attention_avg = torch.mean( - last_layer_attention, dim=1 - )[0] - # generate new attention mask based on the average attention, - # sample the top ATTENTION_RANK tokens with highest attention - last_layer_attention_avg_last_tok = ( - last_layer_attention_avg[-1] - ) - # get the attention in image token - last_layer_attention_avg_last_tok_image = \ - last_layer_attention_avg_last_tok[ - HOLITOM_image_token_start_index: - HOLITOM_image_token_start_index - + HOLITOM_image_token_length - ] - # get the indexes of the top ATTENTION_RANK tokens - top_attention_rank_index = ( - last_layer_attention_avg_last_tok_image.topk( - round( - HOLITOM_image_token_length * (1 - HOLITOM_r) - ) - ).indices - + HOLITOM_image_token_start_index - ) - # print("Before merge:", HOLITOM_image_token_length, "After merge:", - # round(HOLITOM_image_token_length*(1-HOLITOM_r))) - - device = hidden_states.device - # [modified] - all_indices = torch.arange( - HOLITOM_image_token_length, device=device - ) - non_topk_mask = ~torch.isin( - all_indices, - top_attention_rank_index - - HOLITOM_image_token_start_index, - ) - non_topk_indices = ( - all_indices[non_topk_mask] - + HOLITOM_image_token_start_index - ) - non_topk_states = hidden_states[ - :, non_topk_indices, : - ] # [batch_size, len(non_topk), hidden_size] - topk_states = hidden_states[ - :, top_attention_rank_index, : - ] # [batch_size, len(topk), hidden_size] - non_topk_norm = torch.norm( - non_topk_states, dim=-1, keepdim=True - ) # [batch_size, len(non_topk), 1] - topk_norm = torch.norm( - topk_states, dim=-1, keepdim=True - ) # [batch_size, len(topk), 1] - dot_product = torch.bmm( - non_topk_states, topk_states.transpose(1, 2) - ) # [batch_size, len(non_topk), len(topk)] - sim_matrix = dot_product / ( - non_topk_norm * topk_norm.transpose(1, 2) - ) - sim_max, sim_max_index = torch.max(sim_matrix, dim=-1) - - for b in range(hidden_states.size(0)): - for i in range(len(non_topk_indices)): - non_topk_idx = non_topk_indices[i] - most_similar_topk_idx = ( - top_attention_rank_index[ - sim_max_index[b, i] - ] - ) - hidden_states[b, most_similar_topk_idx, :] = ( - hidden_states[b, most_similar_topk_idx, :] - + hidden_states[b, non_topk_idx, :] - ) / 2 - # [modified] - - # keep index - keep_indexes = torch.cat( - ( - torch.arange( - HOLITOM_image_token_start_index, - device=device, - ), - top_attention_rank_index, - torch.arange( - HOLITOM_image_token_start_index - + HOLITOM_image_token_length, - seq_length_with_past, - device=device, - ), - ) - ) - # sort index - keep_indexes = keep_indexes.sort().values - # update seq length - new_seq_length = keep_indexes.shape[0] - # filter hidden states - - hidden_states = hidden_states[ - :, keep_indexes, : - ] - # lead the cuda error in the - # second iteration of decoding layeridx 3 - # update position ids - position_ids = keep_indexes.unsqueeze(0) - - position_embeddings = self.rotary_emb( - hidden_states, position_ids - ) - - cache_position = cache_position[:new_seq_length] - - if layer_idx == HOLITOM_k - 1: - output_attentions = True - else: - output_attentions = False - - layer_outputs = decoder_layer( - hidden_states, - attention_mask=causal_mask, - position_ids=position_ids, - past_key_value=past_key_values, - output_attentions=output_attentions, - use_cache=use_cache, - cache_position=cache_position, - position_embeddings=position_embeddings, - **flash_attn_kwargs, - ) - - hidden_states = layer_outputs[0] - - # if output_attentions: - # all_self_attns += (layer_outputs[1],) - - hidden_states = self.norm(hidden_states) - - # add hidden states from the last decoder layer - if output_hidden_states: - all_hidden_states += (hidden_states,) - - return BaseModelOutputWithPast( - last_hidden_state=hidden_states, - past_key_values=past_key_values if use_cache else None, - hidden_states=all_hidden_states, - attentions=all_self_attns, - ) - - from transformers.models.qwen2.modeling_qwen2 import Qwen2Model - - Qwen2Model.forward = qwen_forward - - def register_reduction_modules(self): - pass diff --git a/llmc/compression/token_reduction/mustdrop.py b/llmc/compression/token_reduction/mustdrop.py deleted file mode 100644 index 97c63942a..000000000 --- a/llmc/compression/token_reduction/mustdrop.py +++ /dev/null @@ -1,235 +0,0 @@ -import functools -import math -from types import MethodType -from typing import Callable, Tuple - -import torch -import torch.nn.functional as F -from einops import rearrange - -from llmc.utils.registry_factory import TOKEN_REDUCTION_REGISTRY - -from .token_reduction_module import TokenReductionModule -from .utils import prepare_inputs_labels_for_multimodal_with_index_masks - - -@TOKEN_REDUCTION_REGISTRY.register('MustDrop') -class MustDrop(TokenReductionModule): - def __init__(self, config, model, blocks): - super().__init__(config, model, blocks) - self.add_sparse_config() - self.register_reduction_modules() - - def add_sparse_config(self): - self.pruning_loc = self.model.pruning_config.get('select_layer', -1) - self.pruning_paras = self.special_config - - def register_reduction_modules(self): - - def conditional_pooling( - feat: torch.Tensor, - threshold: float, - window_size: Tuple[int, int], - fix_r: int = 0, - ) -> Tuple[Callable, Callable]: - - with torch.no_grad(): - - ws_h, ws_w = int(window_size[0]), int(window_size[1]) # 窗口尺寸,2*2 - stride_h, stride_w = ws_h, ws_w - num_token_window = stride_h * stride_w # 窗口内token数量,4 - - _, feat = ( - feat[:, :1, :], - feat[:, 1:, :], - ) # 取出cls token之外的所有tokens,一共576个vision token - B, N, D = feat.size() - base_grid_H = int(math.sqrt(N)) - base_grid_W = base_grid_H - assert ( - base_grid_H * base_grid_W == N - and base_grid_H % ws_h == 0 - and base_grid_W % ws_w == 0 - ) - - feat = rearrange(feat, 'b (h w) c -> b c h w', h=base_grid_H) - - feat = rearrange( - feat, - 'b c (gh ps_h) (gw ps_w) -> b gh gw c ps_h ps_w', - gh=base_grid_H // ws_h, - gw=base_grid_W // ws_w, - ) - b, gh, gw, c, ps_h, ps_w = feat.shape - - # Flatten mxm window for pairwise operations - tensor_flattened = feat.reshape(b, gh, gw, c, -1) - - # Expand dims for pairwise operations - tensor_1 = tensor_flattened.unsqueeze(-1) - tensor_2 = tensor_flattened.unsqueeze(-2) - - # Compute cosine similarities - sims = F.cosine_similarity(tensor_1, tensor_2, dim=3) - - # Exclude the self-similarity (i.e., similarity with oneself will be 1) - sims_mask = 1 - torch.eye(ps_h * ps_w).to(sims.device) - sims = sims * sims_mask - - # Average similarities (excluding the self-similarity) - similarity_map = sims.sum(-1).sum(-1) / ( - (ps_h * ps_w) * (ps_h * ps_w - 1) - ) - - similarity_map = rearrange( - similarity_map.unsqueeze(1), 'b c h w-> b (c h w)' - ) - - # --- adaptive section ---# - - n_B, n_H = similarity_map.shape - node_mean = torch.tensor(threshold).cuda(sims.device) - node_mean = node_mean.repeat(1, n_H) - r = torch.ge(similarity_map, node_mean).sum(dim=1).min() - # -------------# - if fix_r != 0: - r = fix_r - # get top k similar super patches - _, sim_super_patch_idxs = similarity_map.topk(r, dim=-1) - - # --- creating the mergabel and unmergable super patches - tensor = ( - torch.arange(base_grid_H * base_grid_W) - .reshape(base_grid_H, base_grid_W) - .to(feat.device) - ) - - # Repeat the tensor to create a batch of size 2 - tensor = tensor.unsqueeze(0).repeat(B, 1, 1) - - # Apply unfold operation on last two dimensions to create the sliding window - windowed_tensor = tensor.unfold(1, ws_h, stride_h).unfold( - 2, ws_w, stride_w - ) - - # Reshape the tensor to the desired shape - windowed_tensor = windowed_tensor.reshape(B, -1, num_token_window) - - # Use torch.gather to collect the desired elements - gathered_tensor = torch.gather( - windowed_tensor, - 1, - sim_super_patch_idxs.unsqueeze(-1).expand(-1, -1, num_token_window), - ) - - # Create a mask for all indices, for each batch - mask = torch.ones((B, windowed_tensor.shape[1]), dtype=bool).to( - feat.device - ) - - # Create a tensor that matches the shape of indices and fill it with False - mask_values = torch.zeros_like( - sim_super_patch_idxs, dtype=torch.bool - ).to(feat.device) - - # Use scatter_ to update the mask. - # This will set mask[b, indices[b]] = False for all b - mask.scatter_(1, sim_super_patch_idxs, mask_values) - - # Get the remaining tensor - remaining_tensor = windowed_tensor[ - mask.unsqueeze(-1).expand(-1, -1, num_token_window) - ].reshape(B, -1, num_token_window) - unm_idx = ( - remaining_tensor.reshape(B, -1).sort(dim=-1).values.unsqueeze(-1) - ) - dim_index = (num_token_window) - 1 - src_idx = gathered_tensor[:, :, :dim_index].reshape(B, -1).unsqueeze(-1) - dst_idx = gathered_tensor[:, :, dim_index].reshape(B, -1).unsqueeze(-1) - merge_idx = ( - torch.arange(src_idx.shape[1] // dim_index) - .repeat_interleave(dim_index) - .repeat(B, 1) - .unsqueeze(-1) - .to(feat.device) - ) - - def merge(x: torch.Tensor, mode='mean') -> torch.Tensor: - # TODO: num_token_window can be undefined - - x_cls, x_feat = x[:, :1, :], x[:, 1:, :] - n, t1, c = x_feat.shape - src = x_feat.gather(dim=-2, index=src_idx.expand(n, r * dim_index, c)) - dst = x_feat.gather(dim=-2, index=dst_idx.expand(n, r, c)) - unm = x_feat.gather( - dim=-2, index=unm_idx.expand(n, t1 - (r * num_token_window), c) - ) - dst = dst.scatter_reduce( - -2, merge_idx.expand(n, r * dim_index, c), src, reduce=mode - ) - x = torch.cat([dst, unm], dim=1) - x = torch.cat((x_cls, x), dim=1) - - index_masks = torch.zeros((n, t1), dtype=torch.bool, device=x_feat.device) - dst_flat = dst_idx.view(n, -1) - unm_flat = unm_idx.view(n, -1) - index_masks.scatter_(1, dst_flat, True) - index_masks.scatter_(1, unm_flat, True) - - return x, index_masks - - return merge - - def merge_wavg( - merge: Callable, x: torch.Tensor, size: torch.Tensor = None - ) -> Tuple[torch.Tensor, torch.Tensor]: - - if size is None: - size = torch.ones_like(x[..., 0, None]) - - x, index_masks = merge(x * size, mode='sum') - size, _ = merge(size, mode='sum') - x = x / size - - return x, size, index_masks - - def spatial_merge_hook(module, inps, outs, pruning_paras, llava_next): - spatial_threshold = pruning_paras['spatial_threshold'] - window_size = pruning_paras['window_size'] - hidden_states = outs[0] - vtoken_length = hidden_states.shape[1] - fix_r = 0 - if pruning_paras.get('retained_tokens', None) is not None: - retained_tokens = pruning_paras['retained_tokens'] - fix_r = (vtoken_length - retained_tokens) \ - // (window_size[0] * window_size[1] - 1) - merge = conditional_pooling(hidden_states, spatial_threshold, window_size, fix_r) - hidden_states, size, index_masks = merge_wavg(merge, hidden_states, None) - - if not llava_next: - return (hidden_states,) - - pruning_paras['index_masks'] = index_masks - return outs - - def update_index_masks_hook(module, inps, outs, pruning_paras): - module.index_masks = pruning_paras['index_masks'] - - self.blocks[self.pruning_loc].register_forward_hook( - functools.partial( - spatial_merge_hook, - pruning_paras=self.pruning_paras, - llava_next=self.special_config['vision_token_length'] is None - ), - ) - - if self.special_config['vision_token_length'] is None: - - self.model.vlm_model.prepare_inputs_labels_for_multimodal = MethodType( - prepare_inputs_labels_for_multimodal_with_index_masks, - self.model.vlm_model - ) - - self.model.vision_model.register_forward_hook( - functools.partial(update_index_masks_hook, pruning_paras=self.pruning_paras), - ) diff --git a/llmc/compression/token_reduction/prunevid.py b/llmc/compression/token_reduction/prunevid.py deleted file mode 100644 index 290822dc8..000000000 --- a/llmc/compression/token_reduction/prunevid.py +++ /dev/null @@ -1,411 +0,0 @@ -import functools -from typing import List, Optional, Tuple, Union - -import torch -import torch.nn.functional as F -from loguru import logger -from torch import einsum - -try: - from llava.model.llava_arch import LlavaMetaForCausalLM -except ImportError: - pass - -from llmc.utils.registry_factory import TOKEN_REDUCTION_REGISTRY - -from .token_reduction_module import TokenReductionModule -from .utils import add_post_hook_to_get_2dPool - - -def index_points(points, idx): - """Sample features following the index. - Returns: - new_points:, indexed points data, [B, S, C] - - Args: - points: input points data, [B, N, C] - idx: sample index data, [B, S] - """ - device = points.device - B = points.shape[0] - view_shape = list(idx.shape) - view_shape[1:] = [1] * (len(view_shape) - 1) - repeat_shape = list(idx.shape) - repeat_shape[0] = 1 - batch_indices = ( - torch.arange(B, dtype=torch.long) - .to(device) - .view(view_shape) - .repeat(repeat_shape) - ) - new_points = points[batch_indices, idx, :] - return new_points - - -def cluster_dpc_knn(x, cluster_num, k=5, token_mask=None): - """Cluster tokens with DPC-KNN algorithm. - - Return: - idx_cluster (Tensor[B, N]): cluster index of each token. - cluster_num (int): actual cluster number. The same with - input cluster number - Args: - x: input token feature, [B, N, C] - cluster_num (int): cluster number - k (int): number of the nearest neighbor used for local density. - token_mask (Tensor[B, N]): mask indicate the whether the token is - padded empty token. Non-zero value means the token is meaningful, - zero value means the token is an empty token. If set to None, all - tokens are regarded as meaningful. - """ - with torch.no_grad(): - B, N, C = x.shape - - dist_matrix = torch.cdist(x.float(), x.float()) / (C**0.5) - - if token_mask is not None: - token_mask = token_mask > 0 - # in order to not affect the local density, the distance between empty tokens - # and any other tokens should be the maximal distance. - dist_matrix = dist_matrix * token_mask[:, None, :] + ( - dist_matrix.max() + 1 - ) * (~token_mask[:, None, :]) - - # get local density - - dist_nearest, index_nearest = torch.topk( - dist_matrix, k=k, dim=-1, largest=False - ) - density = (-(dist_nearest**2).mean(dim=-1)).exp() - # add a little noise to ensure no tokens have the same density. - density = ( - density - + torch.rand(density.shape, device=density.device, dtype=density.dtype) - * 1e-6 - ) - - if token_mask is not None: - # the density of empty token should be 0 - density = density * token_mask - - # get distance indicator - mask = density[:, None, :] > density[:, :, None] - mask = mask.type(x.dtype) - dist_max = dist_matrix.flatten(1).max(dim=-1)[0][:, None, None] - dist, index_parent = (dist_matrix * mask + dist_max * (1 - mask)).min(dim=-1) - - # select clustering center according to score - score = dist * density - _, index_down = torch.topk(score, k=cluster_num, dim=-1) - - # # assign tokens to the nearest center - dist_matrix = index_points(dist_matrix, index_down) - - idx_cluster = dist_matrix.argmin(dim=1) - - # make sure cluster center merge to itself - idx_batch = torch.arange(B, device=x.device)[:, None].expand(B, cluster_num) - idx_tmp = torch.arange(cluster_num, device=x.device)[None, :].expand( - B, cluster_num - ) - idx_cluster[idx_batch.reshape(-1), index_down.reshape(-1)] = idx_tmp.reshape(-1) - return idx_cluster, cluster_num - - -def refine_clusters(cluster_idx): - """根据给定的聚类结果,对每个批次进行精炼处理。 - - Args: - cluster_idx: Tensor of shape (B, N),每个元素是聚类的索引。 - - Returns: - refined_cluster_idx: Tensor of shape (B, N),精炼后的聚类结果。 - """ - import torch - - B, N = cluster_idx.shape - refined_cluster_idx = cluster_idx.clone() - for b in range(B): - clusters = torch.unique(cluster_idx[b]) - segment_info = {} - # 步骤1:对于每个 cluster,找到其所有的连续片段 - for cluster_label in clusters: - indices = (cluster_idx[b] == cluster_label).nonzero(as_tuple=True)[0] - if indices.numel() == 0: - continue - # 找到连续片段 - segments = [] - start = indices[0].item() - prev = indices[0].item() - for idx in indices[1:]: - idx = idx.item() - if idx == prev + 1: - prev = idx - else: - # 新的片段 - segments.append((start, prev)) - start = idx - prev = idx - # 添加最后一个片段 - segments.append((start, prev)) - segment_info[cluster_label.item()] = segments - - # 步骤2:保留每个 cluster 中最长的片段,其余片段需要重新归类 - for cluster_label, segments in segment_info.items(): - # 找到最长的片段长度 - max_length = 0 - for start, end in segments: - length = end - start + 1 - if length > max_length: - max_length = length - # 如果最长的片段长度为1,且只有长度为1的片段,该 cluster 需要移除 - if max_length == 1: - for start, end in segments: - refined_cluster_idx[b, start: end + 1] = -1 # -1表示需要重新归类 - continue - # 保留最长的片段,重新归类其他片段 - for start, end in segments: - length = end - start + 1 - if length == max_length: - continue # 保留最长的片段 - else: - refined_cluster_idx[b, start: end + 1] = -1 # 需要重新归类 - - # 步骤3:对于需要重新归类的片段,按照左右邻居最长的片段的 cluster 进行归类 - idx = 0 - while idx < N: - if refined_cluster_idx[b, idx] == -1: - # 找到需要重新归类的片段 - start = idx - while idx < N and refined_cluster_idx[b, idx] == -1: - idx += 1 - end = idx - 1 - # 找到左侧和右侧的邻居 cluster 及其片段长度 - left_cluster_label = None - left_length = 0 - if start > 0: - left_label = refined_cluster_idx[b, start - 1].item() - # 左侧片段长度 - l_idx = start - 1 - while l_idx >= 0 and refined_cluster_idx[b, l_idx] == left_label: - l_idx -= 1 - left_length = start - l_idx - 1 - left_cluster_label = left_label - right_cluster_label = None - right_length = 0 - if end < N - 1: - right_label = refined_cluster_idx[b, end + 1].item() - # 右侧片段长度 - r_idx = end + 1 - while r_idx < N and refined_cluster_idx[b, r_idx] == right_label: - r_idx += 1 - right_length = r_idx - end - 1 - right_cluster_label = right_label - # 选择片段长度较长的邻居 cluster 进行归类,若长度相同,选择左侧 - if left_length > right_length: - new_label = left_cluster_label - elif right_length > left_length: - new_label = right_cluster_label - else: - new_label = ( - left_cluster_label - if left_cluster_label is not None - else right_cluster_label - ) - # 如果左右邻居都不存在,默认归类为 cluster 0 - if new_label is None: - new_label = 0 - # 重新归类 - refined_cluster_idx[b, start: end + 1] = new_label - else: - idx += 1 - return refined_cluster_idx - - -def segment_lengths(tensor): - # 获取设备信息(CPU 或 GPU) - device = tensor.device - B, N = tensor.shape - - # 列表用于存储每个视频的段长度 - segment_lengths_list = [] - max_segments = 0 # 记录最大段数 - - for i in range(B): - seq = tensor[i] - # 计算值发生变化的位置 - change_points = torch.where(seq[1:] != seq[:-1])[0] + 1 - # 包含起始和结束位置 - boundaries = torch.cat( - [ - torch.tensor([0], device=device), - change_points, - torch.tensor([N], device=device), - ] - ) - # 计算每个段的长度 - lengths = boundaries[1:] - boundaries[:-1] - segment_lengths_list.append(lengths) - max_segments = max(max_segments, lengths.numel()) - - # 初始化结果张量,填充为0 - result = torch.zeros((B, max_segments), dtype=torch.long, device=device) - # 将每个视频的段长度填入结果张量 - for i in range(B): - lengths = segment_lengths_list[i] - result[i, : lengths.numel()] = lengths - - return result - - -def compute_cluster_vectors(image_key_vectors, cluster_key_idx, num_cluster): - """ - Args: - image_key_vectors: Tensor of shape (B, L, D), the feature vectors - cluster_key_idx: Tensor of shape (B, L), cluster indices for each vector - num_cluster: int, the total number of clusters - - Returns: - cluster_vectors: Tensor of shape (B, num_cluster, D), the averaged features for each cluster - """ - # image_key_vectors: (B, L, D) - # cluster_key_idx: (B, L) - # num_cluster: integer, number of clusters - - B, L, D = image_key_vectors.shape - - # Step 1: 将cluster_key_idx进行one-hot编码 - # 得到的cluster_key_idx_onehot形状为 (B, L, num_cluster) - cluster_key_idx_onehot = F.one_hot(cluster_key_idx, num_classes=num_cluster).to( - dtype=image_key_vectors.dtype - ) - - # Step 2: 计算每个cluster的特征和 - # 首先调整cluster_key_idx_onehot的维度,使其变为 (B, num_cluster, L) - cluster_key_idx_onehot_t = cluster_key_idx_onehot.permute(0, 2, 1) - - # 然后通过矩阵乘法计算每个cluster的特征和,得到的cluster_sums形状为 (B, num_cluster, D) - cluster_sums = torch.bmm(cluster_key_idx_onehot_t, image_key_vectors) - - # Step 3: 计算每个cluster的元素数量 - # cluster_counts形状为 (B, num_cluster) - cluster_counts = cluster_key_idx_onehot.sum(dim=1) - - # Step 4: 计算每个cluster的平均特征 - # 先避免除以0,将cluster_counts中为0的值替换为1 - cluster_counts_nonzero = cluster_counts.clone() - cluster_counts_nonzero[cluster_counts_nonzero == 0] = 1 - - # 计算平均值,结果cluster_features形状为 (B, num_cluster, D) - cluster_features = cluster_sums / cluster_counts_nonzero.unsqueeze(-1) - - # Step 5: 对于没有元素的cluster,将其特征设置为0 - zero_mask = (cluster_counts == 0).unsqueeze(-1) # (B, num_cluster, 1) - cluster_features = cluster_features.masked_fill(zero_mask, 0) - - return cluster_features # (B, num_cluster, D) - - -def spatial_merge_tokens(feature, num_cluster, k): - cluster_idx, _ = cluster_dpc_knn(feature, cluster_num=num_cluster, k=k) - feature = compute_cluster_vectors(feature, cluster_idx, num_cluster=num_cluster) - return feature - - -def merge_frames_dynamic(frames, pruning_paras, k=7): - # B, L, C = frames.shape - B = 1 - num_frames, L, C = frames.shape - threshold = pruning_paras['taus'] - cluster_ratio = pruning_paras['cluster_ratios'] - temporal_segment_ratio = pruning_paras['temporal_segment_ratios'] - frames = frames.view(B, num_frames, L, C) # B T L C - idx_clusters, _ = cluster_dpc_knn( - frames.mean(dim=2), cluster_num=int(num_frames * temporal_segment_ratio), k=k - ) - idx_clusters = refine_clusters(idx_clusters) - window_list = segment_lengths(idx_clusters) - - static_features = [] - dynamic_features = [] - static_sizes = [] - dynamic_sizes = [] - - start_idx = 0 - for window_size in window_list[0]: # 假设window_list的形状为(B, S) - # 获取当前window的帧 - current_frames = frames[:, start_idx: start_idx + window_size, :, :] # B W L C - - # 计算相似度 - frames_normed = F.normalize(current_frames, p=2, dim=-1) - frames_sim = einsum('b w l c, b t l c -> b w t l', frames_normed, frames_normed) - frames_sim = (frames_sim.sum(dim=-2) - 1).sum(dim=-2) / ( - window_size * (window_size - 1) - ) # B L - - # 创建mask - mask = frames_sim > threshold - mask_expand = mask.view(B, 1, L, 1).expand(-1, window_size, -1, C) # B W L C - - # 处理静态特征 - static_mask = mask_expand - static_feat = ( - torch.masked_select(current_frames, static_mask) - .view(B, window_size, -1, C) - .mean(dim=1) - ) - if static_feat.shape[1] > 14: - static_feat = spatial_merge_tokens( - static_feat, num_cluster=int(static_feat.shape[1] * cluster_ratio), k=7 - ) - static_features.append(static_feat) - static_sizes.append(static_feat.shape[1]) - - # 处理动态特征 - dynamic_mask = ~mask_expand - dynamic_feat = torch.masked_select(current_frames, dynamic_mask).view( - B, window_size, -1, C - ) - dynamic_window_list = [] - for i in range(window_size): - dynamic_feat_window = dynamic_feat[:, i, :, :] - if dynamic_feat_window.shape[1] > 14: - dynamic_feat_window = spatial_merge_tokens( - dynamic_feat_window, - num_cluster=int(dynamic_feat_window.shape[1] * cluster_ratio), - k=7, - ) - dynamic_window_list.append(dynamic_feat_window) - dynamic_feat = torch.cat(dynamic_window_list, dim=1) - # dynamic_feat = torch.masked_select(current_frames, dynamic_mask).view(B, -1, C) - - dynamic_features.append(dynamic_feat) - dynamic_sizes.append(dynamic_feat.shape[1]) - - start_idx += window_size - - # 合并所有特征 - final_features = [] - for static_feature, dynamic_feature in zip(static_features, dynamic_features): - final_features.append(static_feature) - final_features.append(dynamic_feature) - final_features = torch.cat(final_features, dim=1) - - # window_sizes = window_list[0].tolist() # 转换为列表形式 - - return final_features - # return final_features, static_sizes, dynamic_sizes, window_sizes - - -@TOKEN_REDUCTION_REGISTRY.register('PruneVid') -class PruneVid(TokenReductionModule): - def __init__(self, config, model, blocks): - super().__init__(config, model, blocks) - self.register_reduction_modules() - - def register_reduction_modules(self): - - if isinstance(self.model.model, LlavaMetaForCausalLM): - add_post_hook_to_get_2dPool( - self.model.model, merge_frames_dynamic, self.special_config - ) diff --git a/llmc/compression/token_reduction/pyramiddrop.py b/llmc/compression/token_reduction/pyramiddrop.py deleted file mode 100644 index aa7a63042..000000000 --- a/llmc/compression/token_reduction/pyramiddrop.py +++ /dev/null @@ -1,410 +0,0 @@ -import functools -import math -from functools import wraps -from types import MethodType - -import torch -from torch import nn -from transformers.modeling_attn_mask_utils import \ - _prepare_4d_causal_attention_mask -from transformers.models.llama.modeling_llama import apply_rotary_pos_emb - -from llmc.utils.registry_factory import TOKEN_REDUCTION_REGISTRY - -from .token_reduction_module import TokenReductionModule -from .utils import prefill_wrapper - - -@TOKEN_REDUCTION_REGISTRY.register('PyramidDrop') -class PyramidDrop(TokenReductionModule): - def __init__(self, config, model, blocks): - super().__init__(config, model, blocks) - self.add_sparse_config() - self.register_reduction_modules() - - def add_sparse_config(self): - - self.pruning_loc = self.special_config['layer_list'] - self.special_config['IMAGE_TOKEN_INDEX'] = \ - self.model.pruning_config['IMAGE_TOKEN_INDEX'] - - image_token_ratio_list = self.special_config['image_token_ratio_list'] - image_token_ratio_list.insert(0, 1.0) - self.special_config['image_token_ratio_list'] = image_token_ratio_list - if self.model.__class__.__name__ == 'LlavaHf': - llama_model = self.model.vlm_model.language_model.model - elif self.model.__class__.__name__ == 'Llava': - llama_model = self.model.vlm_model.model - self.special_config['tokenizer_padding_side'] = getattr( - llama_model.config, - 'tokenizer_padding_side', - 'right', - ) - - self.pruning_paras = self.special_config - - def register_reduction_modules(self): - @prefill_wrapper - def pruning_hook(module, args, kwargs, pruning_pars, cur_num, layer_idx): - - if layer_idx == self.pruning_loc[0]: - position_ids = kwargs['position_ids'] - attention_mask = kwargs['attention_mask'] - position_embeddings = kwargs['position_embeddings'] - else: - attention_mask = pruning_pars['attention_mask'] - position_ids = pruning_pars['position_ids'] - position_embeddings = pruning_pars['position_embeddings'] - - features = args[0] - _position_ids = position_ids - _attention_mask = attention_mask - prompt_len = pruning_pars['prompt_len'] - image_tokens_list = pruning_pars['image_tokens'] - image_token_posi = pruning_pars['image_token_posi'] - image_token_ratio_list = pruning_pars['image_token_ratio_list'] - - # for decoding stage - if features.shape[1] == 1: - return args, kwargs - - if position_ids is None: - position_ids = torch.arange( - 0, features.shape[1], dtype=torch.long, device=features.device - ).unsqueeze(0) - - if pruning_pars['tokenizer_padding_side'] == 'right': - - batch_size = features.shape[0] - image_tokens = [ - int(cur_image_token * image_token_ratio_list[cur_num]) - for cur_image_token in image_tokens_list - ] - keep_length = [ - int(cur_image_token * image_token_ratio_list[cur_num + 1]) - for cur_image_token in image_tokens_list - ] - - features_list = [] - attention_mask_list = [] - - if attention_mask is None: - attention_mask = torch.ones( - (batch_size, features.shape[1]), - dtype=torch.bool, - device=features.device, - ) - else: - attention_mask = attention_mask.bool() - - # obtain query_states and key_states to calculate attention map - hidden_states = features.clone().detach() - self_attn = module.self_attn - hidden_states = module.input_layernorm(hidden_states) - - num_heads = self_attn.num_heads - num_key_value_heads = self_attn.num_key_value_heads - head_dim = self_attn.head_dim - - bsz, q_len, _ = hidden_states.size() - - query_states = self_attn.q_proj(hidden_states) - key_states = self_attn.k_proj(hidden_states) - value_states = self_attn.v_proj(hidden_states) - - query_states = query_states.view( - bsz, q_len, num_heads, head_dim - ).transpose(1, 2) - key_states = key_states.view( - bsz, q_len, num_key_value_heads, head_dim - ).transpose(1, 2) - value_states = value_states.view( - bsz, q_len, num_key_value_heads, head_dim - ).transpose(1, 2) - - if position_embeddings is None: - cos, sin = self_attn.rotary_emb(value_states, position_ids) - else: - cos, sin = position_embeddings - - query_states, key_states = apply_rotary_pos_emb( - query_states, key_states, cos, sin - ) - - # attention_mask - eager_attention_mask = _prepare_4d_causal_attention_mask( - attention_mask, - (batch_size, q_len), - hidden_states, - past_key_values_length=0, - ).to(device=query_states.device) - - # take valid features - features = [ - cur_features[cur_attention_mask] - for cur_features, cur_attention_mask in zip( - features, attention_mask - ) - ] - attention_mask = [ - cur_attention_mask[cur_attention_mask] - for cur_attention_mask, cur_attention_mask in zip( - attention_mask, attention_mask - ) - ] - - # rank & drop - for i in range(batch_size): - image_index = image_token_posi[i] - if image_index == -1: - cur_input_embeds = features[i] - features_list.append(cur_input_embeds) - attention_mask_list.append(attention_mask[i]) - continue - - # obtain current states - cur_key_states = key_states[i] - cur_query_states = query_states[i] - cur_eager_attention_mask = eager_attention_mask[i] - - prompt_total_len = prompt_len[i] + image_tokens[i] - text_query_states = cur_query_states[ - :, prompt_total_len - 1, : - ].unsqueeze(1) - text_eager_attention_mask = cur_eager_attention_mask[ - :, prompt_total_len - 1, : - ].unsqueeze(1) - - # calculate attention map - attn_weights = torch.matmul( - text_query_states, cur_key_states.transpose(1, 2) - ) / math.sqrt( - head_dim - ) # (num_head, text_token,seq_len) - attn_weights = attn_weights + text_eager_attention_mask - attn_weights = nn.functional.softmax( - attn_weights, dim=-1, dtype=torch.float32 - ).to( - query_states.dtype - ) # (num_head, text_token,seq_len) - - attention_avg_head = torch.mean( - attn_weights, dim=0 - ) # ave across heads - attention_avg_head = attention_avg_head[ - :, image_index: image_index + image_tokens[i] - ] # select image token as keys - attention_avg_text = torch.mean(attention_avg_head, dim=0) # (576) - - # rank and drop by attention score - top_rank_index = attention_avg_text.topk(keep_length[i]).indices - top_rank_index = top_rank_index + image_index - top_rank_index = top_rank_index.sort().values - - start_index = image_index + image_tokens[i] - new_input_embeds = torch.cat( - [ - features[i][:image_index, :], - features[i][top_rank_index, :], - features[i][start_index:, :], - ], - dim=0, - ) - new_attention_mask = torch.cat( - [ - attention_mask[i][:image_index], - attention_mask[i][top_rank_index], - attention_mask[i][start_index:], - ], - dim=0, - ) - - features_list.append(new_input_embeds) - attention_mask_list.append(new_attention_mask) - - # Truncate sequences to max length as image embeddings can make the sequence longer - if self.model.__class__.__name__ == 'LlavaHf': - llama_model = self.model.vlm_model.language_model.model - elif self.model.__class__.__name__ == 'Llava': - llama_model = self.model.vlm_model.model - tokenizer_model_max_length = getattr( - llama_model.config, - 'tokenizer_model_max_length', - 2048, - ) - if tokenizer_model_max_length is not None: - new_input_embeds = [ - x[:tokenizer_model_max_length] for x in features_list - ] - new_attention_mask = [ - x[:tokenizer_model_max_length] for x in attention_mask_list - ] - - max_len = max(x.shape[0] for x in new_input_embeds) - - # padding the sequences to form batch - embeds_padded = [] - attention_mask_padded = [] - position_ids = torch.zeros( - (batch_size, max_len), - dtype=position_ids.dtype, - device=position_ids.device, - ) - for i, cur_new_embed in enumerate(new_input_embeds): - cur_len_emb = cur_new_embed.shape[0] - dif = max_len - cur_len_emb # padding to longest seq - - cur_new_embed = torch.cat( - [ - cur_new_embed, - torch.zeros( - (dif, cur_new_embed.shape[1]), - dtype=cur_new_embed.dtype, - device=cur_new_embed.device, - ), - ], - dim=0, - ) - cur_attention_mask = new_attention_mask[i] - cur_attention_mask = torch.cat( - [ - cur_attention_mask, - torch.full( - (dif,), - False, - dtype=cur_attention_mask.dtype, - device=cur_attention_mask.device, - ), - ], - dim=0, - ) - - embeds_padded.append(cur_new_embed) - - attention_mask_padded.append(cur_attention_mask) - - cur_len = new_attention_mask[i].sum().item() - position_ids[i, :cur_len] = torch.arange( - 0, cur_len, dtype=position_ids.dtype, device=position_ids.device - ) - - new_input_embeds = torch.stack(embeds_padded, dim=0) - new_input_embeds = new_input_embeds.to(features[0].dtype) - - new_attention_mask = torch.stack(attention_mask_padded, dim=0) - - if _position_ids is None: - position_ids = None - - if _attention_mask is None: - new_attention_mask = None - else: - new_attention_mask = new_attention_mask.to( - dtype=_attention_mask.dtype - ) - - kwargs['attention_mask'] = new_attention_mask - kwargs['position_ids'] = position_ids - kwargs['position_embeddings'] = None - pruning_pars['attention_mask'] = new_attention_mask - pruning_pars['position_ids'] = position_ids - pruning_pars['position_embeddings'] = None - - return (new_input_embeds,), kwargs - - @prefill_wrapper - def input_hook(module, input_args, pruning_pars): - - input_ids = input_args[0] - pre_prompt_length_list = [] - image_token_posi = [] - vision_tokens = [] - VISION_TOKEN_INDEX = pruning_pars['vision_token_index'] - - # find the position of the first image token - for seq in input_ids: - image_token_idxs = (seq == VISION_TOKEN_INDEX).nonzero(as_tuple=True)[0] - vision_tokens.append(pruning_pars['vision_token_length']) - image_token_posi.append(image_token_idxs[0].item()) - pre_prompt_length_list.append(seq.shape[0] - image_token_idxs.shape[0]) - - pruning_pars['prompt_len'] = pre_prompt_length_list - pruning_pars['image_token_posi'] = image_token_posi - pruning_pars['image_tokens'] = vision_tokens - - return input_args - - def input_hook_llava(fn, pruning_paras): - @wraps(fn) - def wrapper(self, *args, **kwargs): - if len(args) == 0: - return fn(*args, **kwargs) - input_args = args[0] - if hasattr(input_args[0], 'shape') and input_args[0].shape[0] == 1: - return fn(*args, **kwargs) - - input_ids = args[0] - attention_mask = args[2] - - image_token_posi = [] - prompt_len = [] - vision_tokens = [] - for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask): - seq = cur_input_ids[cur_attention_mask] - image_index = torch.where(seq == pruning_paras['IMAGE_TOKEN_INDEX'])[0].tolist() - if image_index == []: - image_token_posi.append(-1) - prompt_len.append(cur_input_ids.shape[0]) - else: - image_token_posi.append(image_index[0]) - prompt_len.append(cur_input_ids.shape[0] - 1) - vision_tokens.append(pruning_paras['vision_token_length']) - - pruning_paras['image_token_posi'] = image_token_posi - pruning_paras['prompt_len'] = prompt_len - pruning_paras['image_tokens'] = vision_tokens - - return fn(*args, **kwargs) - return wrapper - - @prefill_wrapper - def read_parameter_hook(module, args, kwargs, pruning_pars): - kwargs['attention_mask'] = pruning_pars['attention_mask'] - # kwargs['cache_position'] = pruning_pars['cache_position'] - kwargs['position_ids'] = pruning_pars['position_ids'] - kwargs['position_embeddings'] = pruning_pars['position_embeddings'] - - return args, kwargs - - if self.model.__class__.__name__ == 'LlavaHf': - self.model.embed_tokens.register_forward_pre_hook( - functools.partial(input_hook, pruning_pars=self.pruning_paras) - ) - elif self.model.__class__.__name__ == 'Llava': - hook_fn = input_hook_llava( - self.model.vlm_model.prepare_inputs_labels_for_multimodal, - self.pruning_paras - ) - self.model.vlm_model.prepare_inputs_labels_for_multimodal = MethodType( - hook_fn, self.model.vlm_model - ) - - for layer_idx in range(self.pruning_loc[0], len(self.blocks)): - if layer_idx in self.pruning_loc: - stage = self.pruning_loc.index(layer_idx) - self.blocks[layer_idx].register_forward_pre_hook( - functools.partial( - pruning_hook, - pruning_pars=self.pruning_paras, - cur_num=stage, - layer_idx=layer_idx, - ), - with_kwargs=True, - ) - else: - self.blocks[layer_idx].register_forward_pre_hook( - functools.partial( - read_parameter_hook, pruning_pars=self.pruning_paras - ), - with_kwargs=True, - ) diff --git a/llmc/compression/token_reduction/random.py b/llmc/compression/token_reduction/random.py deleted file mode 100644 index e889df78f..000000000 --- a/llmc/compression/token_reduction/random.py +++ /dev/null @@ -1,256 +0,0 @@ -import functools -from types import MethodType - -import torch - -from llmc.utils.registry_factory import TOKEN_REDUCTION_REGISTRY - -from .token_reduction_module import TokenReductionModule -from .utils import prefill_wrapper - - -@TOKEN_REDUCTION_REGISTRY.register('RandomPrune') -class RandomPrune(TokenReductionModule): - def __init__(self, config, model, blocks): - super().__init__(config, model, blocks) - self.add_sparse_config() - self.register_reduction_modules() - - def add_sparse_config(self): - - self.pruning_loc = self.special_config['pruning_loc'] - self.pruning_paras = self.special_config - - def register_reduction_modules(self): - - @prefill_wrapper - def random_pruning_hook(module, args, kwargs, pruning_paras): - - rate = pruning_paras['prune_ratio'] - image_token_start_index = pruning_paras['vision_token_start_index'] - image_token_length = pruning_paras['vision_token_length'] - - hidden_states = args[0] - causal_mask = kwargs['attention_mask'] - - device = hidden_states.device - vision_indexes = torch.arange( - image_token_start_index, - image_token_start_index + image_token_length, - device=device, - ) - if self.model.first_turn_question: - num_keep = round(image_token_length * (1 - rate)) - rand_idx = torch.randperm(image_token_length, device=device)[:num_keep] - vision_indexes = vision_indexes[rand_idx] - - # save rand_idx to module - module.register_buffer('rand_idx', rand_idx) - else: - # load vision_indexes from module (prompt cache) - rand_idx = module.rand_idx - vision_indexes = vision_indexes[rand_idx] - - # keep index - keep_indexs = torch.cat( - ( - torch.arange(image_token_start_index, device=device), - vision_indexes, - torch.arange( - image_token_start_index + image_token_length, - hidden_states.shape[1], - device=device, - ), - ) - ) - - keep_indexs = keep_indexs.sort().values - # filter hidden states & - hidden_states = hidden_states[:, keep_indexs, :] - # update position ids - position_ids = keep_indexs.unsqueeze(0) - # update attention mask - if causal_mask is not None: - causal_mask = causal_mask[ - :, :, : hidden_states.shape[1], : hidden_states.shape[1] - ] - kwargs['attention_mask'].resize_as_(causal_mask).copy_( - causal_mask.clone() - ) - kwargs['cache_position'].resize_as_(position_ids.squeeze(0)).copy_( - position_ids.squeeze(0).clone() - ) - kwargs['position_ids'].resize_as_(position_ids).copy_(position_ids.clone()) - - position_embeddings = kwargs['position_embeddings'] - new_pe0 = position_embeddings[0][:, keep_indexs, :].clone() - new_pe1 = position_embeddings[1][:, keep_indexs, :].clone() - position_embeddings[0].resize_as_(new_pe0).copy_(new_pe0) - position_embeddings[1].resize_as_(new_pe0).copy_(new_pe1) - - return (hidden_states,), kwargs - - @prefill_wrapper - def holitom_merge_hook(module, args, kwargs, pruning_paras): - - rate = pruning_paras['prune_ratio'] - image_token_start_index = pruning_paras['vision_token_start_index'] - image_token_length = pruning_paras['vision_token_length'] - - hidden_states = args[0] - causal_mask = kwargs['attention_mask'] - - device = hidden_states.device - last_layer_attention = pruning_paras['attn_scores'] - # compute average attention over different head - last_layer_attention_avg = torch.mean( - last_layer_attention, dim=1 - )[0] - # generate new attention mask based on the average attention, - # sample the top ATTENTION_RANK tokens with highest attention - last_layer_attention_avg_last_tok = ( - last_layer_attention_avg[-1] - ) - # get the attention in image token - last_layer_attention_avg_last_tok_image = \ - last_layer_attention_avg_last_tok[ - image_token_start_index: - image_token_start_index + image_token_length - ] - # get the indexes of the top ATTENTION_RANK tokens - top_attention_rank_index = ( - last_layer_attention_avg_last_tok_image.topk( - round( - image_token_length * (1 - rate) - ) - ).indices - + image_token_start_index - ) - - all_indices = torch.arange( - image_token_length, device=device - ) - non_topk_mask = ~torch.isin( - all_indices, - top_attention_rank_index - - image_token_start_index, - ) - non_topk_indices = ( - all_indices[non_topk_mask] - + image_token_start_index - ) - non_topk_states = hidden_states[ - :, non_topk_indices, : - ] # [batch_size, len(non_topk), hidden_size] - topk_states = hidden_states[ - :, top_attention_rank_index, : - ] # [batch_size, len(topk), hidden_size] - non_topk_norm = torch.norm( - non_topk_states, dim=-1, keepdim=True - ) # [batch_size, len(non_topk), 1] - topk_norm = torch.norm( - topk_states, dim=-1, keepdim=True - ) # [batch_size, len(topk), 1] - dot_product = torch.bmm( - non_topk_states, topk_states.transpose(1, 2) - ) # [batch_size, len(non_topk), len(topk)] - sim_matrix = dot_product / ( - non_topk_norm * topk_norm.transpose(1, 2) - ) - sim_max, sim_max_index = torch.max(sim_matrix, dim=-1) - - batch_size = hidden_states.size(0) - num_topk = len(top_attention_rank_index) - num_non_topk = len(non_topk_indices) - topk_counter = torch.ones((batch_size, num_topk, 1), device=hidden_states.device) - - for b in range(batch_size): - for i in range(num_non_topk): - topk_rel_idx = sim_max_index[b, i].item() # 这是 topk 中的相对索引 - topk_abs_idx = top_attention_rank_index[topk_rel_idx] # 得到绝对索引 - non_topk_abs_idx = non_topk_indices[i] - - # 累加non-topk到topk token上(就地) - hidden_states[b, topk_abs_idx, :] += hidden_states[b, non_topk_abs_idx, :] - # 增加计数 - topk_counter[b, topk_rel_idx] += 1 - - # 平均化所有topk token(包含自己和所有被合并的) - for b in range(batch_size): - for i in range(num_topk): - topk_abs_idx = top_attention_rank_index[i] - hidden_states[b, topk_abs_idx, :] /= topk_counter[b, i] - - keep_indexs = torch.cat( - ( - torch.arange( - image_token_start_index, - device=device, - ), - top_attention_rank_index, - torch.arange( - image_token_start_index - + image_token_length, - hidden_states.shape[1], - device=device, - ), - ) - ) - - # sort index - keep_indexs = keep_indexs.sort().values - # filter hidden states & - hidden_states = hidden_states[:, keep_indexs, :] - # update position ids - position_ids = keep_indexs.unsqueeze(0) - # update attention mask - if causal_mask is not None: - causal_mask = causal_mask[:, :, :hidden_states.shape[1], :hidden_states.shape[1]] - kwargs['attention_mask'].resize_as_(causal_mask).copy_(causal_mask.clone()) - kwargs['cache_position'].resize_as_(position_ids.squeeze(0)).copy_( - position_ids.squeeze(0).clone()) - kwargs['position_ids'].resize_as_(position_ids).copy_(position_ids.clone()) - - position_embeddings = kwargs['position_embeddings'] - index_dim = 1 if position_embeddings[0].dim() == 3 else 2 - new_pe0 = position_embeddings[0].index_select(index_dim, keep_indexs).clone() - new_pe1 = position_embeddings[1].index_select(index_dim, keep_indexs).clone() - position_embeddings[0].resize_as_(new_pe0).copy_(new_pe0) - position_embeddings[1].resize_as_(new_pe0).copy_(new_pe1) - - return (hidden_states,), kwargs - - def update_output_attentions_hook(module, args, kwargs): - kwargs['output_attentions'] = True - return args, kwargs - - def store_attention_hook(m, x, layer_outputs, pruning_paras): - layer_attention = layer_outputs[1] - pruning_paras['attn_scores'] = layer_attention - - if self.special_config['vision_token_length'] is None: - if self.model.__class__.__name__ == 'Llava': - self.model.vlm_model.prepare_inputs_labels_for_multimodal = MethodType( - self.vtoken_length_for_llava_hook( - self.model.vlm_model.prepare_inputs_labels_for_multimodal, - self.pruning_paras - ), self.model.vlm_model - ) - - if self.special_config['metric'] == 'random': - self.blocks[self.pruning_loc].register_forward_pre_hook( - functools.partial(random_pruning_hook, pruning_paras=self.pruning_paras), - with_kwargs=True - ) - elif self.special_config['metric'] == 'holitom_merge': - self.blocks[self.pruning_loc - 1].register_forward_pre_hook( - update_output_attentions_hook, - with_kwargs=True - ) - self.blocks[self.pruning_loc - 1].register_forward_hook( - functools.partial(store_attention_hook, pruning_paras=self.pruning_paras), - ) - self.blocks[self.pruning_loc].register_forward_pre_hook( - functools.partial(holitom_merge_hook, pruning_paras=self.pruning_paras), - with_kwargs=True - ) diff --git a/llmc/compression/token_reduction/sparsevlm.py b/llmc/compression/token_reduction/sparsevlm.py deleted file mode 100755 index aae8f722b..000000000 --- a/llmc/compression/token_reduction/sparsevlm.py +++ /dev/null @@ -1,631 +0,0 @@ -import functools -import math -from functools import wraps -from types import MethodType - -import einops as ein -import torch - -from llmc.utils.registry_factory import TOKEN_REDUCTION_REGISTRY - -from .token_reduction_module import TokenReductionModule -from .utils import prefill_wrapper, prefill_wrapper_model - -layer_dict = {} -prune_flag = True -merge_flag = True -sparse_token_list_192 = [] -sparse_token_list_128 = [] -sparse_token_list_64 = [] -sparse_token_list_640 = [] -sparse_token_list_320 = [] -sparse_token_list_160 = [] -sparse_token_dict = {} - - -@TOKEN_REDUCTION_REGISTRY.register('SparseVLM') -class SparseVLM(TokenReductionModule): - def __init__(self, config, model, blocks): - super().__init__(config, model, blocks) - self.add_sparse_config() - self.register_reduction_modules() - - def add_sparse_config(self): - - self.pruning_loc = self.special_config.get('pruning_loc', [2, 6, 15]) - global layer_dict, prune_flag, merge_flag - layer_dict = {layer: idx for idx, layer in enumerate(self.pruning_loc)} - prune_flag = self.special_config.get('prune_flag', True) - merge_flag = self.special_config.get('merge_flag', True) - update_list() - self.pruning_paras = self.special_config - self.pruning_paras['pre_prompt_length_list'] = [] - - def register_reduction_modules(self): - @prefill_wrapper - def input_hook(module, args, pruning_paras): - input_ids = args[0] - pre_prompt_length_list = [] - - # find the position of the first image token - for seq in input_ids: - image_token_index = ( - seq == pruning_paras['vision_token_index'] - ).nonzero(as_tuple=True)[0] - if len(image_token_index) > 0: - pre_prompt_length_list.append(image_token_index[0].item()) - else: - pre_prompt_length_list.append(0) - pruning_paras['pre_prompt_length_list'] = pre_prompt_length_list - - def input_hook_llava(fn, pruning_paras, llava_next=False): - @wraps(fn) - def wrapper(self, *args, **kwargs): - if args[0].shape[1] == 1: - return fn(*args, **kwargs) - - input_ids = args[0] - attention_mask = args[2] - - if attention_mask is None: - attention_mask = torch.ones_like(input_ids, dtype=torch.bool) - else: - attention_mask = attention_mask.bool() - - pre_prompt_length_list = [] - for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask): - seq = cur_input_ids[cur_attention_mask] - image_token_index = ( - [-1] - + torch.where(seq == pruning_paras['vision_token_index'])[0].tolist() - + [seq.shape[0]] - ) - pre_prompt_length_list.append(image_token_index[1]) - - pruning_paras['pre_prompt_length_list'] = pre_prompt_length_list - - outs = fn(*args, **kwargs) - if llava_next: - pruning_paras['vision_token_length'] = outs[-1] - return outs - return wrapper - - @prefill_wrapper_model - def register_module_paras(module, args, kwargs, pruning_paras): - pre_prompt_length_list = pruning_paras['pre_prompt_length_list'] - hidden_states = kwargs['inputs_embeds'] - if hidden_states is None: - hidden_states = module.embed_tokens(kwargs['input_ids']) - - B, L, _ = hidden_states.shape - pruning_paras['B'] = B - - v_token_start = pre_prompt_length_list[0] if len( - pre_prompt_length_list) != 0 else 0 - text_token_start = v_token_start + pruning_paras['vision_token_length'] - pruning_paras['v_token_start'] = v_token_start # 35 - pruning_paras['text_token_start'] = text_token_start # 611 - pruning_paras['v_token_num'] = pruning_paras['vision_token_length'] # 576 - pruning_paras['retained_tokens'] = round( - pruning_paras['vision_token_length'] * (1 - pruning_paras['reduction_ratio']) - ) - - if (len(pre_prompt_length_list) != 0 and hidden_states.shape[1] != 1): - v_t = hidden_states[:, v_token_start: text_token_start, :] - t_t = hidden_states[:, text_token_start:, :] - m_v_t = v_t @ t_t.transpose(1, 2) - m_v_t = m_v_t.softmax(2).mean(1) - pruning_paras['t_token_idx'] = torch.where(m_v_t > m_v_t.mean()) - - return args, kwargs - - def update_output_attentions_hook(module, args, kwargs, pruning_paras, layer_idx): - kwargs['output_attentions'] = True - if layer_idx != self.pruning_loc[0]: - kwargs['position_ids'] = pruning_paras['position_ids'] - kwargs['attention_mask'] = pruning_paras['attention_mask'] - kwargs['cache_position'] = pruning_paras['cache_position'] - kwargs['position_embeddings'] = pruning_paras['position_embeddings'] - return args, kwargs - - def update_kwargs_hook(module, args, kwargs, pruning_paras, layer_idx): - - if len(kwargs['position_ids'][0]) == 1: - return args, kwargs - if layer_idx != self.pruning_loc[0]: - kwargs['position_ids'] = pruning_paras['position_ids'] - kwargs['attention_mask'] = pruning_paras['attention_mask'] - kwargs['cache_position'] = pruning_paras['cache_position'] - kwargs['position_embeddings'] = pruning_paras['position_embeddings'] - else: - pruning_paras['position_ids'] = kwargs['position_ids'] - pruning_paras['attention_mask'] = kwargs['attention_mask'] - pruning_paras['cache_position'] = kwargs['cache_position'] - pruning_paras['position_embeddings'] = kwargs['position_embeddings'] - return args, kwargs - - def get_attn_logits_hook(module, args, kwargs, layer_outs, pruning_paras, layer_idx): - - if len(kwargs['position_ids'][0]) == 1: - return layer_outs - - from transformers.models.llama.modeling_llama import \ - apply_rotary_pos_emb - - hidden_states = kwargs['hidden_states'] - position_embeddings = kwargs['position_embeddings'] - position_ids = kwargs['position_ids'] - past_key_value = layer_outs[2] - attention_mask = kwargs['attention_mask'] - - t_token_idx = pruning_paras['t_token_idx'] - v_token_start = pruning_paras['v_token_start'] - v_token_num = pruning_paras['v_token_num'] - - bsz, q_len, _ = hidden_states.size() - query_states = module.q_proj(hidden_states) - key_states = module.k_proj(hidden_states) - value_states = module.v_proj(hidden_states) - query_states = query_states.view( - bsz, q_len, module.num_heads, module.head_dim - ).transpose(1, 2) - key_states = key_states.view( - bsz, q_len, module.num_key_value_heads, module.head_dim - ).transpose(1, 2) - value_states = value_states.view( - bsz, q_len, module.num_key_value_heads, module.head_dim - ).transpose(1, 2) - - if position_embeddings is None: - cos, sin = module.rotary_emb(value_states, position_ids) - else: - cos, sin = position_embeddings - - query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) - if past_key_value is not None: - key_states = past_key_value.key_cache[layer_idx] - value_states = past_key_value.value_cache[layer_idx] - t_token_idx = t_token_idx[1] + v_token_start + v_token_num - L, S = query_states.size(-2), key_states.size(-2) - scale_factor = 1 / math.sqrt(query_states.size(-1)) - attn_bias = torch.zeros(L, S, dtype=query_states.dtype) - if module.is_causal: - assert attention_mask is None - temp_mask = torch.ones(L, S, dtype=torch.bool).tril(diagonal=0) - attn_bias.masked_fill_(temp_mask.logical_not(), float('-inf')) - attn_bias.to(query_states.dtype) - - attn_logits = query_states @ key_states.transpose(2, 3) * scale_factor - attn_logits += attn_bias.to(query_states.device) - attn_logits = torch.softmax(attn_logits, dim=-1) - - pruning_paras['attn_logits'] = attn_logits - - return layer_outs - - @prefill_wrapper - def decoder_attn_hook(module, inputs, kwargs, layer_outputs, pruning_paras, layer_idx): - - if 'attn_logits' not in pruning_paras: - attn_logits = layer_outputs[1] # for LlavaHf, but error - else: - attn_logits = pruning_paras['attn_logits'] - prune_flag = pruning_paras.get('prune_flag', True) - merge_flag = pruning_paras['merge_flag'] - v_token_start = pruning_paras['v_token_start'] - v_token_num = pruning_paras['v_token_num'] - text_token_start = pruning_paras['text_token_start'] - t_token_idx = pruning_paras['t_token_idx'] - retained_tokens = pruning_paras['retained_tokens'] - - B = pruning_paras['B'] - pre_prompt_length_list = pruning_paras['pre_prompt_length_list'] - vision_token_length = pruning_paras['vision_token_length'] - - attention_mask = kwargs['attention_mask'] - position_embeddings = kwargs['position_embeddings'] - - hidden_states = inputs[0] # [B, L, D] - pred_score_vis, s_flag, relation_vis_text = attn_postprocess_topk( - attn_logits, - v_token_start, - v_token_num, - text_token_start, - t_token_idx, - layer_idx, - retained_tokens, - pruning_paras['reduction_ratio'] - ) - if not prune_flag: - pred_score_vis = torch.zeros_like(relation_vis_text, dtype=bool) - policy = torch.ones(B, hidden_states.shape[1], dtype=hidden_states.dtype, - device=hidden_states.device) - policy[:, v_token_start:text_token_start] = \ - pred_score_vis.type(dtype=hidden_states.dtype) - - for batch in range(len(pre_prompt_length_list)): - # keep pre prompt - prompt_length = pre_prompt_length_list[batch] - policy[batch, :prompt_length] = 1 - # keep question - text_token_start = prompt_length + vision_token_length - policy[batch, text_token_start:] = 1 - - if self.model.first_turn_question: - vision_mask = policy[:, v_token_start:v_token_start + v_token_num] - module.register_buffer('vision_mask', vision_mask) - else: - vision_mask = module.vision_mask - policy[:, v_token_start:v_token_start + v_token_num] = vision_mask - - total_sparse_token_idx = torch.where(policy == 0)[1].unsqueeze(0) - - # merge and cluster - if s_flag and merge_flag and total_sparse_token_idx.shape[1] > 0: - total_sparse_token = batch_index_select( - layer_outputs[0], total_sparse_token_idx - ) - - merge_token_idx_stage1 = torch.where(pred_score_vis == 0)[1] - merge_token_stage1 = relation_vis_text[0][merge_token_idx_stage1] - if prune_flag: - merge_token_num_stage1 = int(merge_token_idx_stage1.shape[0] * 0.3) + 1 - else: - merge_token_num_stage1 = ( - merge_token_idx_stage1.shape[0] - - sparse_token_dict[retained_tokens][layer_dict[layer_idx]] - ) - merge_token_stage2_idx = merge_token_stage1.topk(merge_token_num_stage1)[1] - if not prune_flag: - all_idx = torch.arange( - merge_token_stage1.size(0), - device=merge_token_stage1.device - ) - non_topk_idx = all_idx[~torch.isin(all_idx, merge_token_stage2_idx)] - pred_score_vis[0][non_topk_idx] = 1 - policy[:, v_token_start:text_token_start] = \ - pred_score_vis.type(dtype=hidden_states.dtype) - - merge_token_stage2 = total_sparse_token[:, merge_token_stage2_idx, :] - cluster_num = int(merge_token_stage2.shape[1] / 10) + 1 - if cluster_num == 0: - cluster_num = merge_token_stage2.shape[1] - merge_sparse_token, index_down = cluster_and_merge(merge_token_stage2, cluster_num) - - cluster_idx = total_sparse_token_idx.squeeze(0)[merge_token_stage2_idx[index_down]] - cluster_idx = cluster_idx.squeeze(0) - select_token_idx = torch.where(policy == 1)[1].unsqueeze(0) - select_token = batch_index_select(layer_outputs[0], select_token_idx) - select_vis_token_num = pred_score_vis.sum() - keep_indexs = torch.cat( - ( - select_token_idx.squeeze(0)[:v_token_start + select_vis_token_num], - cluster_idx, - select_token_idx.squeeze(0)[v_token_start + select_vis_token_num:] - ) - ) - select_and_merge_token = torch.cat( - ( - select_token[:, :v_token_start + select_vis_token_num, :], - merge_sparse_token, - select_token[:, v_token_start + select_vis_token_num:, :] - ), - dim=1 - ) - layer_outputs = (select_and_merge_token, layer_outputs[1]) - v_token_num = pred_score_vis.sum() + cluster_num - - else: - keep_indexs = torch.where(policy == 1)[1] - select_token_idx = keep_indexs.unsqueeze(0) - layer_outputs = (batch_index_select(layer_outputs[0], select_token_idx), - layer_outputs[1]) - v_token_num = pred_score_vis.sum() - - text_token_start = v_token_start + v_token_num - position_ids = keep_indexs.unsqueeze(0) - new_output = layer_outputs - cache_position = position_ids.squeeze(0) - - if attention_mask is not None: - attention_mask = attention_mask[:, :, keep_indexs, keep_indexs] - new_pe0 = position_embeddings[0][:, keep_indexs, :].clone() - new_pe1 = position_embeddings[1][:, keep_indexs, :].clone() - position_embeddings = (new_pe0, new_pe1) - - pruning_paras['v_token_num'] = v_token_num - pruning_paras['text_token_start'] = text_token_start - - pruning_paras['position_ids'] = position_ids - pruning_paras['cache_position'] = cache_position - pruning_paras['position_embeddings'] = position_embeddings - pruning_paras['attention_mask'] = attention_mask - - return new_output - - @prefill_wrapper - def read_parameter_hook(module, args, kwargs, pruning_paras): - kwargs['position_ids'] = pruning_paras['position_ids'] - kwargs['attention_mask'] = pruning_paras['attention_mask'] - kwargs['cache_position'] = pruning_paras['cache_position'] - kwargs['position_embeddings'] = pruning_paras['position_embeddings'] - - return args, kwargs - - if self.model.__class__.__name__ == 'LlavaHf': - self.model.embed_tokens.register_forward_pre_hook( - functools.partial(input_hook, pruning_paras=self.pruning_paras) - ) - elif self.model.__class__.__name__ == 'Llava': - self.model.vlm_model.prepare_inputs_labels_for_multimodal = MethodType( - input_hook_llava( - self.model.vlm_model.prepare_inputs_labels_for_multimodal, - self.pruning_paras, - llava_next=self.special_config['vision_token_length'] is None - ), self.model.vlm_model - ) - - if self.model.__class__.__name__ == 'LlavaHf': - llama_model = self.model.model - elif self.model.__class__.__name__ == 'Llava': - llama_model = self.model.model.model - llama_model.register_forward_pre_hook( - functools.partial(register_module_paras, pruning_paras=self.pruning_paras), - with_kwargs=True - ) - - sorted_pruning_locs = sorted(self.pruning_loc) - total_layers = len(self.blocks) - - for block_idx in range(sorted_pruning_locs[0], total_layers): - if block_idx in sorted_pruning_locs: - if self.model.__class__.__name__ == 'LlavaHf': - self.blocks[block_idx].register_forward_pre_hook( - functools.partial( - update_output_attentions_hook, - pruning_paras=self.pruning_paras, - layer_idx=block_idx, - ), - with_kwargs=True - ) - elif self.model.__class__.__name__ == 'Llava': - self.blocks[block_idx].register_forward_pre_hook( - functools.partial( - update_kwargs_hook, - pruning_paras=self.pruning_paras, - layer_idx=block_idx, - ), - with_kwargs=True - ) - self.blocks[block_idx].self_attn.register_forward_hook( - functools.partial( - get_attn_logits_hook, - pruning_paras=self.pruning_paras, - layer_idx=block_idx, - ), - with_kwargs=True - ) - self.blocks[block_idx].register_forward_hook( - functools.partial( - decoder_attn_hook, - pruning_paras=self.pruning_paras, - layer_idx=block_idx - ), - with_kwargs=True - ) - else: - self.blocks[block_idx].register_forward_pre_hook( - functools.partial( - read_parameter_hook, - pruning_paras=self.pruning_paras - ), - with_kwargs=True - ) - - -def update_list(): - global sparse_token_list_192, sparse_token_list_128, sparse_token_list_64 - global sparse_token_list_640, sparse_token_list_320, sparse_token_list_160 - global prune_flag, merge_flag, sparse_token_dict - - if layer_dict == {2: 0, 6: 1, 15: 2}: # 2*576 4*300 10*200 16*110 - sparse_token_list_192 = [300, 200, 110] - sparse_token_list_128 = [303, 110, 36] - sparse_token_list_64 = [66, 30, 17] - prune_flag, merge_flag = True, True - elif prune_flag and merge_flag: - sparse_token_list_192 = [180] - sparse_token_list_128 = [114] - sparse_token_list_64 = [48] - sparse_token_list_640 = [0.1979] - sparse_token_list_320 = [0.0833] - sparse_token_list_160 = [0.0261] - elif prune_flag: - sparse_token_list_192 = [192] - sparse_token_list_128 = [128] - sparse_token_list_64 = [64] - sparse_token_list_640 = [0.2222] - sparse_token_list_320 = [0.1111] - sparse_token_list_160 = [0.0555] - elif merge_flag: - sparse_token_list_192 = [149] - sparse_token_list_128 = [78] - sparse_token_list_64 = [7] - else: - raise RuntimeError( - 'Both prune_flag and merge_flag are False — sparseVLM is inactive.' - ) - - sparse_token_dict = { - 192: sparse_token_list_192, - 128: sparse_token_list_128, - 64: sparse_token_list_64, - 640: sparse_token_list_640, - 320: sparse_token_list_320, - 160: sparse_token_list_160 - } - - -def attn_postprocess_topk( - self_attn_weights, - v_token_start, - v_token_num, - text_token_start, - t_token_idx, - layer_idx, - retained_tokens, - reduction_ratio): - ''' - self_attn_weights: [B, H, L, L] - ''' - self_attn_weights = self_attn_weights.mean(1) # B, L[Q], L[K] - - t_token_idx = t_token_idx[1] + text_token_start - - relation_vis_text = self_attn_weights[:, t_token_idx, - v_token_start: v_token_start + v_token_num] # B, L2, L1 - - relation_vis_text = relation_vis_text.mean(1) # B, L1 - - relation_vis = relation_vis_text - s_flag = True # s_flag controls whether token merge is needed. - if retained_tokens in [192, 128, 64]: - sparse_token_list = sparse_token_dict[retained_tokens] - else: - sparse_token_list = sparse_token_dict[round((1 - reduction_ratio) * 2880)] - retained_tokens_prune = sparse_token_list[layer_dict[layer_idx]] - if retained_tokens_prune < 1: - retained_tokens_prune = round(retained_tokens_prune * v_token_num) - if v_token_num != 0: - mask = torch.zeros_like(relation_vis, dtype=bool) - _, indices = torch.topk(relation_vis, min( - retained_tokens_prune, v_token_num - 1), dim=1) - mask[0][indices] = 1 - else: - mask = torch.ones_like(relation_vis_text, dtype=bool) - s_flag = False - return mask, s_flag, relation_vis_text - - -def batch_index_select(x, idx): - if len(x.size()) == 4: - B, H, N, C = x.size() - N_new = idx.size(1) - offset = torch.arange(B, dtype=torch.long, - device=x.device).view(B, 1) * N - idx = idx + offset - out = x.reshape(B * N, H, C)[idx.reshape(-1)].reshape(B, H, N_new, C) - return out - elif len(x.size()) == 3: - # in this condition - B, N, C = x.size() - N_new = idx.size(1) - offset = torch.arange(B, dtype=torch.long, - device=x.device).view(B, 1) * N - idx = idx + offset - out = x.reshape(B * N, C)[idx.reshape(-1)].reshape(B, N_new, C) - return out - elif len(x.size()) == 2: - B, N = x.size() - N_new = idx.size(1) - offset = torch.arange(B, dtype=torch.long, - device=x.device).view(B, 1) * N - idx = idx + offset - out = x.reshape(B * N)[idx.reshape(-1)].reshape(B, N_new) - return out - else: - raise NotImplementedError - - -def index_points(points, idx): - """Sample features following the index. - Returns: - new_points:, indexed points data, [B, S, C] - - Args: - points: input points data, [B, N, C] - idx: sample index data, [B, S] - """ - device = points.device - B = points.shape[0] - view_shape = list(idx.shape) - view_shape[1:] = [1] * (len(view_shape) - 1) - repeat_shape = list(idx.shape) - repeat_shape[0] = 1 - batch_indices = torch.arange(B, dtype=torch.long).to( - device).view(view_shape).repeat(repeat_shape) - new_points = points[batch_indices, idx, :] - return new_points - - -def cluster_and_merge(x, cluster_num): - - B, N, C = x.shape - - x1 = ein.rearrange(x, 'b l r -> b l () r') - x2 = ein.rearrange(x, 'b l r -> b () l r') - distance = (x1 - x2).norm(dim=-1, p=2) - dist_matrix = distance / (C ** 0.5) - # get local density - dist_nearest, index_nearest = torch.topk( - dist_matrix, k=cluster_num, dim=-1, largest=False) - density = (-(dist_nearest ** 2).mean(dim=-1)).exp() - # add a little noise to ensure no tokens have the same density. - density = density + torch.rand( - density.shape, device=density.device, dtype=density.dtype) * 1e-6 - - # get distance indicator - mask = density[:, None, :] > density[:, :, None] - mask = mask.type(x.dtype) - dist_max = dist_matrix.flatten(1).max(dim=-1)[0][:, None, None] - dist, _ = (dist_matrix * mask + - dist_max * (1 - mask)).min(dim=-1) - - # select clustering center according to score - score = dist * density - _, index_down = torch.topk(score, k=cluster_num, dim=-1) - - # assign tokens to the nearest center - dist_matrix = index_points(dist_matrix, index_down) - - idx_cluster = dist_matrix.argmin(dim=1) - - # make sure cluster center merge to itself - idx_batch = torch.arange(B, device=x.device)[ - :, None].expand(B, cluster_num) - idx_tmp = torch.arange(cluster_num, device=x.device)[ - None, :].expand(B, cluster_num) - idx_cluster[idx_batch.reshape(-1), - index_down.reshape(-1)] = idx_tmp.reshape(-1) - - # merge tokens - - B, N, C = x.shape - # device = dist_matrix.device - # idx_token = torch.arange(N)[None, :].repeat(B, 1).to(device) - # agg_weight = x.new_ones(B, N, 1) - - token_weight = x.new_ones(B, N, 1) - # self_attn_weights = self_attn_weights.mean(1) - # token_weight = self_attn_weights.sum(dim=1).exp().unsqueeze(2) - # B_weight,N_weigh,C_weight = token_weight.shape - # token_weight = token_weight.reshape(B_weight*N_weigh, C_weight) - # [sparse_token_idx.reshape(-1)].reshape(B, N, 1) - - idx_batch = torch.arange(B, device=x.device)[:, None] - idx = idx_cluster + idx_batch * cluster_num - - all_weight = token_weight.new_zeros(B * cluster_num, 1) - all_weight.index_add_(dim=0, index=idx.reshape(B * N), - source=token_weight.reshape(B * N, 1)) - all_weight = all_weight + 1e-6 - norm_weight = token_weight / all_weight[idx] - - # average token features - x_merged = x.new_zeros(B * cluster_num, C) - source = x * norm_weight - x_merged.index_add_(dim=0, index=idx.reshape(B * N), - source=source.reshape(B * N, C).type(x.dtype)) - x_merged = x_merged.reshape(B, cluster_num, C) - - return x_merged, index_down diff --git a/llmc/compression/token_reduction/token_reduction_module.py b/llmc/compression/token_reduction/token_reduction_module.py deleted file mode 100644 index bb3a1c5dc..000000000 --- a/llmc/compression/token_reduction/token_reduction_module.py +++ /dev/null @@ -1,52 +0,0 @@ - -from functools import wraps - - -class TokenReductionModule: - def __init__(self, config, model, blocks): - self.config = config - self.model = model - self.blocks = blocks - self.set_sparse_config() - - def set_sparse_config(self): - self.special_config = self.config.get('special', {}) - self.special_config['is_video_model'] = self.model.pruning_config['is_video_model'] - # vision_token can be image or video - if self.special_config['is_video_model']: - self.special_config['vision_token_index'] = self.model.pruning_config[ - 'video_token_index' - ] - self.special_config['vision_token_length'] = self.model.pruning_config[ - 'video_token_length' - ] - else: - self.special_config['vision_token_index'] = self.model.pruning_config.get( - 'image_token_index', None - ) - self.special_config['vision_token_start_index'] = self.model.pruning_config.get( - 'vision_token_start_index', None - ) - self.special_config['vision_token_length'] = self.model.pruning_config.get( - 'image_token_length', None - ) - - def register_reduction_modules(self): - pass - - def vtoken_length_for_llava_hook(self, fn, pruning_paras): - @wraps(fn) - def wrapper(self, *args, **kwargs): - if args[0].shape[1] == 1: - return fn(*args, **kwargs) - - message = ( - 'To obtain the vision_token_length for LLaVA-1.6, you should append ' - '`image_features[0].shape[0]` to the return value of the function ' - '`prepare_inputs_labels_for_multimodal`, and modify the related code accordingly.' - ) - outs = fn(*args, **kwargs) - assert len(outs) == 7, message - pruning_paras['vision_token_length'] = outs[-1] - return outs - return wrapper diff --git a/llmc/compression/token_reduction/tome.py b/llmc/compression/token_reduction/tome.py deleted file mode 100644 index 1bd660e0a..000000000 --- a/llmc/compression/token_reduction/tome.py +++ /dev/null @@ -1,258 +0,0 @@ -import math -import types -from typing import Callable, Optional, Tuple - -import torch -from transformers.models.clip.modeling_clip import CLIPEncoderLayer - -from llmc.utils.registry_factory import TOKEN_REDUCTION_REGISTRY - -from .token_reduction_module import TokenReductionModule - - -@TOKEN_REDUCTION_REGISTRY.register('ToMe') -class ToMe(TokenReductionModule): - def __init__(self, config, model, blocks): - super().__init__(config, model, blocks) - self.add_sparse_config() - self.patch_layer() - - def add_sparse_config(self): - r_param = self.special_config.get('r', 0) - if isinstance(r_param, int) or isinstance(r_param, float): - self.r = [max(int(r_param), 0)] * len(self.blocks) - elif isinstance(r_param, (tuple, list)): - if len(r_param) == 2: - start_r, step_r = r_param - self.r = [max(int(start_r + i * step_r), 0) for i in range(len(self.blocks))] - else: - self.r = [0] * len(self.blocks) - for i, val in enumerate(r_param): - if i < len(self.blocks): - self.r[i] = max(int(val), 0) - else: - raise ValueError('Invalid r format. Expected int or (start, step) tuple.') - - self.pruning_paras = self.special_config - - def patch_layer(self): - for idx, block in enumerate(self.blocks): - if self.r[idx] > 0: - block.r = self.r[idx] - if isinstance(block, CLIPEncoderLayer): # llava - block.self_attn.forward = types.MethodType( - tome_CLIPSdpaAttention_forward, - block.self_attn - ) - block.forward = types.MethodType( - tome_CLIPEncoderLayer_forward, - block - ) - - -def do_nothing(x, mode=None): - return x - - -def bipartite_soft_matching( - metric: torch.Tensor, - r: int, - class_token: bool = False, - distill_token: bool = False, -) -> Tuple[Callable, Callable]: - """Applies ToMe with a balanced matching set (50%, 50%). - - Input size is [batch, tokens, channels]. - r indicates the number of tokens to remove (max 50% of tokens). - - Extra args: - - class_token: Whether or not there's a class token. - - distill_token: Whether or not there's also a distillation token. - - When enabled, the class token and distillation tokens won't get merged. - """ - protected = 0 - if class_token: - protected += 1 - if distill_token: - protected += 1 - - # We can only reduce by a maximum of 50% tokens - t = metric.shape[1] - r = min(r, (t - protected) // 2) - - if r <= 0: - return do_nothing, do_nothing - - with torch.no_grad(): - metric = metric / metric.norm(dim=-1, keepdim=True) - a, b = metric[..., ::2, :], metric[..., 1::2, :] - scores = a @ b.transpose(-1, -2) - - if class_token: - scores[..., 0, :] = -math.inf - if distill_token: - scores[..., :, 0] = -math.inf - - node_max, node_idx = scores.max(dim=-1) - edge_idx = node_max.argsort(dim=-1, descending=True)[..., None] - - unm_idx = edge_idx[..., r:, :] # Unmerged Tokens - src_idx = edge_idx[..., :r, :] # Merged Tokens - dst_idx = node_idx[..., None].gather(dim=-2, index=src_idx) - - if class_token: - # Sort to ensure the class token is at the start - unm_idx = unm_idx.sort(dim=1)[0] - - def merge(x: torch.Tensor, mode='mean') -> torch.Tensor: - src, dst = x[..., ::2, :], x[..., 1::2, :] - n, t1, c = src.shape - unm = src.gather(dim=-2, index=unm_idx.expand(n, t1 - r, c)) - src = src.gather(dim=-2, index=src_idx.expand(n, r, c)) - dst = dst.scatter_reduce(-2, dst_idx.expand(n, r, c), src, reduce=mode) - - if distill_token: - return torch.cat([unm[:, :1], dst[:, :1], unm[:, 1:], dst[:, 1:]], dim=1) - else: - return torch.cat([unm, dst], dim=1) - - def unmerge(x: torch.Tensor) -> torch.Tensor: - unm_len = unm_idx.shape[1] - unm, dst = x[..., :unm_len, :], x[..., unm_len:, :] - n, _, c = unm.shape - - src = dst.gather(dim=-2, index=dst_idx.expand(n, r, c)) - - out = torch.zeros(n, metric.shape[1], c, device=x.device, dtype=x.dtype) - - out[..., 1::2, :] = dst - out.scatter_(dim=-2, index=(2 * unm_idx).expand(n, unm_len, c), src=unm) - out.scatter_(dim=-2, index=(2 * src_idx).expand(n, r, c), src=src) - - return out - - return merge, unmerge - - -def merge_wavg( - merge: Callable, x: torch.Tensor, size: torch.Tensor = None -) -> Tuple[torch.Tensor, torch.Tensor]: - """Applies the merge function by taking a weighted average based on token - size. - - Returns the merged tensor and the new token sizes. - """ - if size is None: - size = torch.ones_like(x[..., 0, None]) - - x = merge(x * size, mode='sum') - size = merge(size, mode='sum') - - x = x / size - return x, size - - -def tome_CLIPSdpaAttention_forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - causal_attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = False, -) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: - - from packaging import version - parsed_torch_version_base = version.parse(version.parse(torch.__version__).base_version) - is_torch_greater_or_equal_than_2_2 = parsed_torch_version_base >= version.parse('2.2') - - if output_attentions: - return super().forward( - hidden_states=hidden_states, - attention_mask=attention_mask, - causal_attention_mask=causal_attention_mask, - output_attentions=output_attentions, - ) - - # CLIP text model uses both `causal_attention_mask` and `attention_mask` - if attention_mask is not None and causal_attention_mask is not None: - attn_mask = attention_mask + causal_attention_mask - elif causal_attention_mask is not None: - attn_mask = causal_attention_mask - else: - attn_mask = attention_mask - - bsz, tgt_len, embed_dim = hidden_states.size() - - query_states = self.q_proj(hidden_states) - key_states = self.k_proj(hidden_states) - value_states = self.v_proj(hidden_states) - - query_states = query_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) - key_states = key_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) - value_states = value_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) - - if all([ - not is_torch_greater_or_equal_than_2_2, - query_states.device.type == 'cuda', - attn_mask is not None - ]): - query_states = query_states.contiguous() - key_states = key_states.contiguous() - value_states = value_states.contiguous() - - # CLIP text model uses both `causal_attention_mask` and `attention_mask` sequentially. - attn_output = torch.nn.functional.scaled_dot_product_attention( - query_states, - key_states, - value_states, - attn_mask=attn_mask, - dropout_p=self.dropout if self.training else 0.0, - scale=self.scale, - ) - - attn_output = attn_output.transpose(1, 2) - attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) - - attn_output = self.out_proj(attn_output) - - return attn_output, None, key_states.mean(1) - - -def tome_CLIPEncoderLayer_forward( - self, - hidden_states: torch.Tensor, - attention_mask: torch.Tensor, - causal_attention_mask: torch.Tensor, - output_attentions: Optional[bool] = False, -) -> Tuple[torch.FloatTensor]: - - residual = hidden_states - - hidden_states = self.layer_norm1(hidden_states) - hidden_states, attn_weights, key_mean = self.self_attn( - hidden_states=hidden_states, - attention_mask=attention_mask, - causal_attention_mask=causal_attention_mask, - output_attentions=output_attentions, - ) - hidden_states = residual + hidden_states - - # ToMe - merge, _ = bipartite_soft_matching( - key_mean, - self.r, - True - ) - hidden_states, _ = merge_wavg(merge, hidden_states) - - residual = hidden_states - hidden_states = self.layer_norm2(hidden_states) - hidden_states = self.mlp(hidden_states) - hidden_states = residual + hidden_states - - outputs = (hidden_states,) - - if output_attentions: - outputs += (attn_weights,) - - return outputs diff --git a/llmc/compression/token_reduction/utils.py b/llmc/compression/token_reduction/utils.py deleted file mode 100755 index 100dd5674..000000000 --- a/llmc/compression/token_reduction/utils.py +++ /dev/null @@ -1,559 +0,0 @@ -import ast -import re -from functools import wraps -from typing import List, Tuple, Union - -import torch -from loguru import logger -from transformers.models.clip.modeling_clip import CLIPEncoderLayer - -try: - from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX -except ImportError: - pass -import random - - -def prefill_wrapper(func): - @wraps(func) - def wrapper(*args, **kwargs): - # for the decoding stage - if len(args) > 1: - input_args = args[1] - if hasattr(input_args[0], 'shape') and input_args[0].shape[1] == 1: - return None - return func(*args, **kwargs) - return wrapper - - -def prefill_wrapper_model(func): - @wraps(func) - def wrapper(*args, **kwargs): - # for the decoding stage - if len(args) > 1: - input_args = args[2]['inputs_embeds'] - if hasattr(input_args, 'shape') and input_args.shape[1] == 1: - return None - return func(*args, **kwargs) - return wrapper - - -def parse_r(num_layers: int, r: Union[List[int], Tuple[int, float], int]) -> List[int]: - """Copy from the TOME. https://github.com/facebookresearch/ToMe. - - Process a constant r or r schedule into a list for use internally. - - r can take the following forms: - - int: A constant number of tokens per layer. - - Tuple[int, float]: A pair of r, inflection. - Inflection describes there the the reduction / layer should trend - upward (+1), downward (-1), or stay constant (0). A value of (r, 0) - is as providing a constant r. (r, -1) is what we describe in the paper - as "decreasing schedule". Any value between -1 and +1 is accepted. - - List[int]: A specific number of tokens per layer. For extreme granularity. - """ - inflect = 0 - if isinstance(r, list): - if len(r) < num_layers: - r = r + [0] * (num_layers - len(r)) - return list(r) - elif isinstance(r, tuple): - r, inflect = r - - min_val = int(r * (1.0 - inflect)) - max_val = 2 * r - min_val - step = (max_val - min_val) / (num_layers - 1) - - return [int(min_val + step * i) for i in range(num_layers)] - - -def make_tome_class(transformer_class): - class VisionZipTransformer(transformer_class): - """ - Modifications: - - Initialize r - """ - def forward(self, *args, **kwargs) -> torch.Tensor: - self._info['r'] = parse_r(len(self.vision_model.encoder.layers), self.r) - # self._info["r"] = self.r - return super().forward(*args, **kwargs) - - return VisionZipTransformer - - -def apply_info(model, dominant_num, contextual_num): - - VisionZipTransformer = make_tome_class(model.__class__) - - model.__class__ = VisionZipTransformer - model.r = [0 for i in range(22)] + [1] + [0] - - model._info = { - 'r': [model.r], - 'dominant': dominant_num, - 'contextual': contextual_num, - } - for module in model.modules(): - if isinstance(module, CLIPEncoderLayer): - module.self_attn.k_proj._info = model._info - - -def add_post_hook_to_get_2dPool(model, post_hook_fn, pruning_paras): - original_fn = model.get_2dPool - - def wrapped_fn(*args, **kwargs): - result = original_fn(*args, **kwargs) - return post_hook_fn(result, pruning_paras) - - model.get_2dPool = wrapped_fn - - -def select_best_resolution(original_size, possible_resolutions): - - original_width, original_height = original_size - best_fit = None - max_effective_resolution = 0 - min_wasted_resolution = float('inf') - - for width, height in possible_resolutions: - # Calculate the downscaled size to keep the aspect ratio - scale = min(width / original_width, height / original_height) - downscaled_width = int(original_width * scale) - downscaled_height = int(original_height * scale) - - # Calculate effective and wasted resolutions - effective_resolution = min( - downscaled_width * downscaled_height, - original_width * original_height - ) - wasted_resolution = (width * height) - effective_resolution - - if (effective_resolution > max_effective_resolution) or ( - effective_resolution == max_effective_resolution and - wasted_resolution < min_wasted_resolution - ): - max_effective_resolution = effective_resolution - min_wasted_resolution = wasted_resolution - best_fit = (width, height) - - return best_fit - - -def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size): - """Calculate the shape of the image patch grid after the preprocessing for - images of any resolution. - - Args: - image_size (tuple): The size of the input image in the format (width, height). - grid_pinpoints (str): A string representation of a list of possible resolutions. - patch_size (int): The size of each image patch. - - Returns: - tuple: The shape of the image patch grid in the format (width, height). - """ - if isinstance(grid_pinpoints, str) and 'x' in grid_pinpoints: - assert patch_size in [224, 336, 384, 448, 512], ( - 'patch_size should be in [224, 336, 384, 448, 512]' - ) - # Use regex to extract the range from the input string - matches = re.findall(r'\((\d+)x(\d+)\)', grid_pinpoints) - range_start = tuple(map(int, matches[0])) - range_end = tuple(map(int, matches[-1])) - # Generate a matrix of tuples - # from (range_start[0], range_start[1]) to (range_end[0], range_end[1]) - grid_pinpoints = [ - (i, j) - for i in range(range_start[0], range_end[0] + 1) - for j in range(range_start[1], range_end[1] + 1) - ] - # Multiply all elements by patch_size - grid_pinpoints = [[dim * patch_size for dim in pair] for pair in grid_pinpoints] - if type(grid_pinpoints) is list: - possible_resolutions = grid_pinpoints - else: - possible_resolutions = ast.literal_eval(grid_pinpoints) - width, height = select_best_resolution(image_size, possible_resolutions) - return width // patch_size, height // patch_size - - -def unpad_image(tensor, original_size): - """Unpads a PyTorch tensor of a padded and resized image. - - Args: - tensor (torch.Tensor): The image tensor, assumed to be in CxHxW format. - original_size (tuple): The original size of the image (height, width). - - Returns: - torch.Tensor: The unpadded image tensor. - """ - original_width, original_height = original_size - current_height, current_width = tensor.shape[1:] - - # Compute aspect ratios - original_aspect_ratio = original_width / original_height - current_aspect_ratio = current_width / current_height - - # Determine padding size and direction - if original_aspect_ratio > current_aspect_ratio: - # Padding was added to the height - scale_factor = current_width / original_width - new_height = int(original_height * scale_factor) - padding = (current_height - new_height) // 2 - unpadded_tensor = tensor[:, padding: current_height - padding, :] - else: - # Padding was added to the width - scale_factor = current_height / original_height - new_width = int(original_width * scale_factor) - padding = (current_width - new_width) // 2 - unpadded_tensor = tensor[:, :, padding: current_width - padding] - - return unpadded_tensor - - -def prepare_inputs_labels_for_multimodal_with_index_masks( - self, input_ids, position_ids, attention_mask, past_key_values, labels, - images, modalities=['image'], image_sizes=None -): - vision_tower = self.get_vision_tower() - if vision_tower is None or images is None or input_ids.shape[1] == 1: - return input_ids, position_ids, attention_mask, past_key_values, None, labels, None - - if isinstance(modalities, str): - modalities = [modalities] - - if type(images) is list or images.ndim == 5: - if type(images) is list: - images = [x.unsqueeze(0) if x.ndim == 3 else x for x in images] - - video_idx_in_batch = [] - for _ in range(len(modalities)): - if modalities[_] == 'video': - video_idx_in_batch.append(_) - - images_list = [] - for image in images: - if image.ndim == 4: - images_list.append(image) - else: - images_list.append(image.unsqueeze(0)) - - concat_images = torch.cat([image for image in images_list], dim=0) - split_sizes = [image.shape[0] for image in images_list] - encoded_image_features = self.encode_images(concat_images) - index_masks = vision_tower.index_masks - encoded_image_features = torch.split(encoded_image_features, split_sizes) - index_masks = torch.split(index_masks, split_sizes) - image_features = [] - for idx, image_feat in enumerate(encoded_image_features): - if idx in video_idx_in_batch: - image_features.append(self.get_2dPool(image_feat)) - else: - image_features.append(image_feat) - mm_patch_merge_type = getattr(self.config, 'mm_patch_merge_type', 'flat') - # mm_patch_merge_type = mm_patch_merge_type.replace('_unpad', '') - image_aspect_ratio = getattr(self.config, 'image_aspect_ratio', 'square') - - if mm_patch_merge_type == 'flat': - image_features = [x.flatten(0, 1) for x in image_features] - index_masks = [x.flatten(0, 1) for x in index_masks] - image_features = [x[m] for x, m in zip(image_features, index_masks)] - elif mm_patch_merge_type.startswith('spatial'): - new_image_features = [] - for image_idx, (image_feature, index_mask) in enumerate( - zip(image_features, index_masks) - ): - if image_idx in video_idx_in_batch: # video operations - raise NotImplementedError - elif image_feature.shape[0] > 1: - - base_image_feature, base_index_mask = image_feature[0], index_mask[0] - image_feature, index_mask = image_feature[1:], index_mask[1:] - height = width = self.get_vision_tower().num_patches_per_side - assert height * width == base_image_feature.shape[0] - - if image_aspect_ratio == 'anyres': - if hasattr(self.get_vision_tower(), 'image_size'): - vision_tower_image_size = self.get_vision_tower().image_size - else: - raise ValueError('vision_tower_image_size is not found.') - try: - num_patch_width, num_patch_height = get_anyres_image_grid_shape( - image_sizes[image_idx], - self.config.image_grid_pinpoints, - vision_tower_image_size - ) - except Exception: - num_patch_width, num_patch_height = 2, 2 - image_feature = image_feature.view( - num_patch_height, num_patch_width, height, width, -1 - ) - index_mask = index_mask.view( - num_patch_height, num_patch_width, height, width - ) - else: - raise NotImplementedError - - if 'maxpool2x2' in mm_patch_merge_type: - raise NotImplementedError - elif 'unpad' in mm_patch_merge_type and 'anyres_max' in image_aspect_ratio: - raise NotImplementedError - elif 'unpad' in mm_patch_merge_type: - image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous() - image_feature = image_feature.flatten(1, 2).flatten(2, 3) - image_feature = unpad_image(image_feature, image_sizes[image_idx]) - image_feature = torch.cat( - ( - image_feature, - self.model.image_newline[ - :, None, None - ].expand(*image_feature.shape[:-1], 1).to(image_feature.device) - ), dim=-1 - ) - image_feature = image_feature.flatten(1, 2).transpose(0, 1) - index_mask = index_mask.permute(0, 2, 1, 3).contiguous().unsqueeze(0) - index_mask = index_mask.flatten(1, 2).flatten(2, 3) - index_mask = unpad_image(index_mask, image_sizes[image_idx]) - index_mask = torch.cat(( - index_mask, - torch.ones( - *index_mask.shape[:-1], 1, dtype=torch.bool - ).to(index_mask.device) - ), dim=-1) - index_mask = index_mask.flatten(1, 2).squeeze(0) - image_feature = image_feature[index_mask] - else: - image_feature = image_feature.permute(0, 2, 1, 3, 4).contiguous() - image_feature = image_feature.flatten(0, 3) - index_mask = index_mask.permute(0, 2, 1, 3).contiguous() - index_mask = index_mask.flatten(0, 3) - image_feature = image_feature[index_mask] - if 'nobase' in mm_patch_merge_type: - pass - else: - base_image_feature = base_image_feature[base_index_mask] - image_feature = torch.cat((base_image_feature, image_feature), dim=0) - new_image_features.append(image_feature) - else: # single image operations - image_feature = image_feature[0] - index_mask = index_mask[0] - if 'unpad' in mm_patch_merge_type: - image_feature = torch.cat(( - image_feature, - self.model.image_newline[None].to(image_feature.device) - ), dim=0) - index_mask = torch.cat(( - index_mask, - torch.ones(1, dtype=torch.bool).to(index_mask.device) - ), dim=0) - image_feature = image_feature[index_mask] - new_image_features.append(image_feature) - image_features = new_image_features - else: - raise ValueError(f'Unexpected mm_patch_merge_type: {self.config.mm_patch_merge_type}') - else: - image_features = self.encode_images(images) - image_features = image_features[index_masks].unsqueeze(0) - vision_tower.index_masks = [] - vtoken_length = image_features[0].shape[0] - # TODO: image start / end is not implemented here to support pretraining. - if ( - getattr(self.config, 'tune_mm_mlp_adapter', False) and - getattr(self.config, 'mm_use_im_start_end', False) - ): - raise NotImplementedError - # rank_print(f"Total images : {len(image_features)}") - - # Let's just add dummy tensors if they do not exist, - # it is a headache to deal with None all the time. - # But it is not ideal, and if you have a better idea, - # please open an issue / submit a PR, thanks. - _labels = labels - _position_ids = position_ids - _attention_mask = attention_mask - if attention_mask is None: - attention_mask = torch.ones_like(input_ids, dtype=torch.bool) - else: - attention_mask = attention_mask.bool() - if position_ids is None: - position_ids = torch.arange( - 0, input_ids.shape[1], - dtype=torch.long, device=input_ids.device - ) - if labels is None: - labels = torch.full_like(input_ids, IGNORE_INDEX) - - # remove the padding using attention_mask -- FIXME - input_ids = [ - cur_input_ids[cur_attention_mask] - for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask) - ] - labels = [ - cur_labels[cur_attention_mask] - for cur_labels, cur_attention_mask in zip(labels, attention_mask) - ] - - new_input_embeds = [] - new_labels = [] - cur_image_idx = 0 - # rank_print("Inserting Images embedding") - for batch_idx, cur_input_ids in enumerate(input_ids): - num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum() - # rank0_print(num_images) - if num_images == 0: - cur_image_features = image_features[cur_image_idx] - cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids) - cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0]], dim=0) - new_input_embeds.append(cur_input_embeds) - new_labels.append(labels[batch_idx]) - cur_image_idx += 1 - continue - - image_token_indices = [-1] + \ - torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() + [cur_input_ids.shape[0]] - cur_input_ids_noim = [] - cur_labels = labels[batch_idx] - cur_labels_noim = [] - for i in range(len(image_token_indices) - 1): - cur_input_ids_noim.append( - cur_input_ids[image_token_indices[i] + 1: image_token_indices[i + 1]] - ) - cur_labels_noim.append( - cur_labels[image_token_indices[i] + 1: image_token_indices[i + 1]] - ) - split_sizes = [x.shape[0] for x in cur_labels_noim] - cur_input_embeds = self.get_model().embed_tokens(torch.cat(cur_input_ids_noim)) - cur_input_embeds_no_im = torch.split(cur_input_embeds, split_sizes, dim=0) - cur_new_input_embeds = [] - cur_new_labels = [] - - for i in range(num_images + 1): - cur_new_input_embeds.append(cur_input_embeds_no_im[i]) - cur_new_labels.append(cur_labels_noim[i]) - if i < num_images: - try: - cur_image_features = image_features[cur_image_idx] - except IndexError: - cur_image_features = image_features[cur_image_idx - 1] - cur_image_idx += 1 - cur_new_input_embeds.append(cur_image_features) - cur_new_labels.append( - torch.full( - (cur_image_features.shape[0],), - IGNORE_INDEX, - device=cur_labels.device, dtype=cur_labels.dtype - ) - ) - - cur_new_input_embeds = [x.to(self.device) for x in cur_new_input_embeds] - - cur_new_input_embeds = torch.cat(cur_new_input_embeds) - cur_new_labels = torch.cat(cur_new_labels) - - new_input_embeds.append(cur_new_input_embeds) - new_labels.append(cur_new_labels) - - # Truncate sequences to max length as image embeddings can make the sequence longer - tokenizer_model_max_length = getattr(self.config, 'tokenizer_model_max_length', None) - # rank_print("Finishing Inserting") - - new_input_embeds = [ - x[:tokenizer_model_max_length] - for x, modality in zip(new_input_embeds, modalities) - ] - new_labels = [ - x[:tokenizer_model_max_length] - for x, modality in zip(new_labels, modalities) - ] - - # Combine them - max_len = max(x.shape[0] for x in new_input_embeds) - batch_size = len(new_input_embeds) - - new_input_embeds_padded = [] - new_labels_padded = torch.full( - (batch_size, max_len), - IGNORE_INDEX, - dtype=new_labels[0].dtype, - device=new_labels[0].device - ) - attention_mask = torch.zeros( - (batch_size, max_len), - dtype=attention_mask.dtype, - device=attention_mask.device - ) - position_ids = torch.zeros( - (batch_size, max_len), - dtype=position_ids.dtype, device=position_ids.device - ) - # rank0_print("Prepare pos id") - - for i, (cur_new_embed, cur_new_labels) in enumerate(zip(new_input_embeds, new_labels)): - cur_len = cur_new_embed.shape[0] - if getattr(self.config, 'tokenizer_padding_side', 'right') == 'left': - new_input_embeds_padded.append( - torch.cat( - ( - torch.zeros( - (max_len - cur_len, cur_new_embed.shape[1]), - dtype=cur_new_embed.dtype, device=cur_new_embed.device - ), - cur_new_embed - ), dim=0 - ) - ) - if cur_len > 0: - new_labels_padded[i, -cur_len:] = cur_new_labels - attention_mask[i, -cur_len:] = True - position_ids[i, -cur_len:] = torch.arange( - 0, cur_len, - dtype=position_ids.dtype, device=position_ids.device - ) - else: - new_input_embeds_padded.append( - torch.cat( - ( - cur_new_embed, - torch.zeros( - (max_len - cur_len, cur_new_embed.shape[1]), - dtype=cur_new_embed.dtype, device=cur_new_embed.device - ) - ), dim=0 - ) - ) - if cur_len > 0: - new_labels_padded[i, :cur_len] = cur_new_labels - attention_mask[i, :cur_len] = True - position_ids[i, :cur_len] = torch.arange( - 0, cur_len, - dtype=position_ids.dtype, device=position_ids.device - ) - - new_input_embeds = torch.stack(new_input_embeds_padded, dim=0) - # rank0_print("tokenizer padding") - - if _labels is None: - new_labels = None - else: - new_labels = new_labels_padded - - if _attention_mask is None: - attention_mask = None - else: - attention_mask = attention_mask.to(dtype=_attention_mask.dtype) - - if _position_ids is None: - position_ids = None - if getattr(self.config, 'use_pos_skipping', False) and self.training: - position_ids = torch.arange( - new_input_embeds.size(1), - device=new_input_embeds.device - ).unsqueeze(0).to(new_input_embeds.device) - split_position = random.randint(0, new_input_embeds.size(1)) - left_add = random.randint(0, self.config.pos_skipping_range) - right_add = random.randint(left_add, self.config.pos_skipping_range) - position_ids[:, :split_position] += left_add - position_ids[:, split_position:] += right_add - # rank0_print("Finish preparing") - # print(vtoken_length) - return None, position_ids, attention_mask, past_key_values, \ - new_input_embeds, new_labels, vtoken_length diff --git a/llmc/compression/token_reduction/visionzip.py b/llmc/compression/token_reduction/visionzip.py deleted file mode 100755 index 97988e9e0..000000000 --- a/llmc/compression/token_reduction/visionzip.py +++ /dev/null @@ -1,638 +0,0 @@ -import functools -import math -from functools import wraps -from types import MethodType -from typing import Any, List, Optional, Tuple, Union - -import torch -import torch.nn as nn -from transformers.models.llava.modeling_llava import \ - LlavaCausalLMOutputWithPast - -from llmc.utils.registry_factory import TOKEN_REDUCTION_REGISTRY - -from .token_reduction_module import TokenReductionModule -from .utils import (apply_info, prefill_wrapper, - prepare_inputs_labels_for_multimodal_with_index_masks) - - -def visionzip_forward( - self, - input_ids: torch.LongTensor = None, - pixel_values: torch.FloatTensor = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_values: Optional[List[torch.FloatTensor]] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - vision_feature_layer: Optional[int] = None, - vision_feature_select_strategy: Optional[str] = None, - labels: Optional[torch.LongTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - cache_position: Optional[torch.LongTensor] = None, - num_logits_to_keep: int = 0, -) -> Union[Tuple, LlavaCausalLMOutputWithPast]: - - output_attentions = ( - output_attentions - if output_attentions is not None - else self.config.output_attentions - ) - output_hidden_states = ( - output_hidden_states - if output_hidden_states is not None - else self.config.output_hidden_states - ) - return_dict = ( - return_dict if return_dict is not None else self.config.use_return_dict - ) - vision_feature_layer = ( - vision_feature_layer - if vision_feature_layer is not None - else self.config.vision_feature_layer - ) - vision_feature_select_strategy = ( - vision_feature_select_strategy - if vision_feature_select_strategy is not None - else self.config.vision_feature_select_strategy - ) - - if (input_ids is None) ^ (inputs_embeds is not None): - raise ValueError( - 'You cannot specify both input_ids and ' - 'inputs_embeds at the same time, and must specify either one' - ) - - if pixel_values is not None and inputs_embeds is not None: - raise ValueError( - 'You cannot specify both pixel_values and ' - 'inputs_embeds at the same time, and must specify either one' - ) - - legacy_processing = False - if inputs_embeds is None: - inputs_embeds = self.get_input_embeddings()(input_ids) - - legacy_processing = ( - (input_ids == self.config.image_token_index).sum(1).max() - < self.config.image_seq_length - ) or (input_ids.shape[-1] == 1 and pixel_values is not None) - - if pixel_values is not None: - image_outputs = self.vision_tower(pixel_values, output_hidden_states=True) - # this is not memory efficient at all - # (output_hidden_states=True) will save all the hidden stated. - selected_image_feature = image_outputs.hidden_states[vision_feature_layer] - if vision_feature_select_strategy == 'default': - selected_image_feature = selected_image_feature[:, 1:] - elif vision_feature_select_strategy == 'full': - selected_image_feature = selected_image_feature - else: - raise ValueError( - f'Unexpected select \ - feature strategy: {self.config.vision_feature_select_strategy}' - ) - - image_features = self.multi_modal_projector(selected_image_feature) - - image_token_idxs = (input_ids == self.config.image_token_index).nonzero( - as_tuple=True - ) - image_start_idx, image_end_idx = image_token_idxs[1][0], image_token_idxs[1][-1] - image_token_num = image_features.shape[1] - input_ids = torch.cat( - [ - input_ids[:, :image_start_idx], - input_ids[:, image_start_idx: image_start_idx + image_token_num], - input_ids[:, image_end_idx + 1:], - ], - dim=1, - ) - inputs_embeds = torch.cat( - [ - inputs_embeds[:, :image_start_idx], - inputs_embeds[:, image_start_idx: image_start_idx + image_token_num], - inputs_embeds[:, image_end_idx + 1:], - ], - dim=1, - ) - token_num = input_ids.shape[1] - attention_mask = attention_mask[:, :token_num] - position_ids = position_ids[:, :token_num] - cache_position = cache_position[:token_num] - - if legacy_processing: - # prefill stage vs decoding stage (legacy behavior copied) - if input_ids.shape[1] != 1: - inputs_embeds, attention_mask, labels, position_ids = ( - self._merge_input_ids_with_image_features( - image_features, inputs_embeds, input_ids, attention_mask, labels - ) - ) - cache_position = torch.arange( - attention_mask.shape[1], device=attention_mask.device - ) - else: - # Retrieve the first layer to inspect the logits and mask out the hidden states - # that are set to 0 - first_layer_past_key_value = past_key_values[0][0][:, :, :, 0] - - batch_index, non_attended_tokens = torch.where( - first_layer_past_key_value.float().sum(-2) == 0 - ) - - # Get the target length - target_length = input_ids.shape[1] - past_length = first_layer_past_key_value.shape[-1] - - extended_attention_mask = torch.ones( - (attention_mask.shape[0], past_length), - dtype=attention_mask.dtype, - device=attention_mask.device, - ) - - # Filter out only the tokens that can be un-attended, this can happen - # if one uses Llava + Fused modules where the cache on the - # first iteration is already big enough, or if one passes custom cache - valid_indices = non_attended_tokens < extended_attention_mask.size(-1) - new_batch_index = batch_index[valid_indices] - new_non_attended_tokens = non_attended_tokens[valid_indices] - - # Zero-out the places where we don't need to attend - extended_attention_mask[new_batch_index, new_non_attended_tokens] = 0 - - attention_mask = torch.cat( - (extended_attention_mask, attention_mask[:, -target_length:]), dim=1 - ) - position_ids = torch.sum(attention_mask, dim=1).unsqueeze(-1) - 1 - cache_position = torch.arange( - attention_mask.shape[1], device=attention_mask.device - )[-target_length:] - - # TODO: @raushan retain only the new behavior after v4.47 - else: - special_image_mask = ( - (input_ids == self.config.image_token_index) - .unsqueeze(-1) - .expand_as(inputs_embeds) - ) - image_features = image_features.to( - inputs_embeds.device, inputs_embeds.dtype - ) - inputs_embeds = inputs_embeds.masked_scatter( - special_image_mask, image_features - ) - - outputs = self.language_model( - attention_mask=attention_mask, - position_ids=position_ids, - past_key_values=past_key_values, - inputs_embeds=inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - cache_position=cache_position, - num_logits_to_keep=num_logits_to_keep, - ) - - logits = outputs[0] - - loss = None - if labels is not None: - # Shift so that tokens < n predict n - if attention_mask is not None: - shift_attention_mask = attention_mask[..., 1:] - shift_logits = logits[..., :-1, :][ - shift_attention_mask.to(logits.device) != 0 - ].contiguous() - shift_labels = labels[..., 1:][ - shift_attention_mask.to(labels.device) != 0 - ].contiguous() - else: - shift_logits = logits[..., :-1, :].contiguous() - shift_labels = labels[..., 1:].contiguous() - # Flatten the tokens - loss_fct = nn.CrossEntropyLoss() - loss = loss_fct( - shift_logits.view(-1, shift_logits.size(-1)), - shift_labels.view(-1).to(shift_logits.device), - ) - - if not return_dict: - output = (logits,) + outputs[1:] - return (loss,) + output if loss is not None else output - - return LlavaCausalLMOutputWithPast( - loss=loss, - logits=logits, - past_key_values=outputs.past_key_values, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - image_hidden_states=image_features if pixel_values is not None else None, - ) - - -def Qwen2_5_VLVisionAttention_forward( - self, - hidden_states: torch.Tensor, - pruning_paras, - cu_seqlens: torch.Tensor, - rotary_pos_emb: Optional[torch.Tensor] = None, - position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, -) -> torch.Tensor: - from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import \ - apply_rotary_pos_emb_vision - head_dim = self.qkv.in_features // self.num_heads - seq_length = hidden_states.shape[0] - q, k, v = self.qkv(hidden_states).reshape( - seq_length, 3, self.num_heads, -1 - ).permute(1, 0, 2, 3).unbind(0) - if position_embeddings is None: - emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1) - cos = emb.cos() - sin = emb.sin() - else: - cos, sin = position_embeddings - q, k = apply_rotary_pos_emb_vision(q, k, cos, sin) - - attention_mask = torch.full( - [1, seq_length, seq_length], torch.finfo(q.dtype).min, device=q.device, dtype=q.dtype - ) - for i in range(1, len(cu_seqlens)): - attention_mask[..., cu_seqlens[i - 1]: cu_seqlens[i], cu_seqlens[i - 1]: cu_seqlens[i]] = 0 - - q = q.transpose(0, 1) - k = k.transpose(0, 1) - v = v.transpose(0, 1) - attn_weights = torch.matmul(q, k.transpose(1, 2)) / math.sqrt(head_dim) - attn_weights = attn_weights + attention_mask - attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(q.dtype) - attn_output = torch.matmul(attn_weights, v) - attn_output = attn_output.transpose(0, 1) - attn_output = attn_output.reshape(seq_length, -1) - attn_output = self.proj(attn_output) - pruning_paras['attn_logits'] = attn_weights - pruning_paras['attn_key'] = k - return attn_output - - -@TOKEN_REDUCTION_REGISTRY.register('VisionZip') -class VisionZip(TokenReductionModule): - def __init__(self, config, model, blocks): - super().__init__(config, model, blocks) - self.add_sparse_config() - self.register_reduction_modules() - - def add_sparse_config(self): - self.dominant = self.special_config['dominant'] - self.contextual = self.special_config['contextual'] - - self.pruning_paras = self.special_config - prune_only = self.special_config.get('prune_only', False) - merge_only = self.special_config.get('merge_only', False) - assert not (prune_only and merge_only), 'prune_only and merge_only cannot both be True' - self.pruning_paras['prune_only'] = prune_only - self.pruning_paras['merge_only'] = merge_only - - def register_reduction_modules(self): - - def visionzip_hook(m, images, image_forward_outs, pruning_paras, llava_next): - attn_weights = image_forward_outs.attentions[-2] - hidden_states = image_forward_outs.hidden_states[-2] - metric = self.blocks[-2].self_attn.k_proj.metric - dominant_num = m._info['dominant'] - contextual_num = m._info['contextual'] - - # Dominant Visual Tokens - cls_idx = 0 - cls_attention = attn_weights[:, :, cls_idx, cls_idx + 1:] - cls_attention_sum = cls_attention.sum(dim=1) - topk_indices = cls_attention_sum.topk(dominant_num, dim=1).indices + 1 - if pruning_paras['merge_only']: - all_indices = torch.zeros( - (hidden_states.shape[0], 1), - dtype=topk_indices.dtype, device=topk_indices.device - ) - dominant_num = 0 - else: - all_indices = torch.cat( - [ - torch.zeros( - (hidden_states.shape[0], 1), - dtype=topk_indices.dtype, device=topk_indices.device, - ), - topk_indices, - ], dim=1, - ) - - mask = torch.ones_like( - hidden_states[:, :, 0], dtype=torch.bool, device=metric.device - ).scatter_(1, all_indices, False) - - if self.model.first_turn_question: - m.register_buffer('mask', mask) - else: - mask = m.mask - - dominant_tokens = hidden_states.masked_select(~mask.unsqueeze(-1)).view( - hidden_states.shape[0], dominant_num + 1, hidden_states.shape[2] - ) - - # Filter - metric_filtered = metric[mask].view( - hidden_states.shape[0], - hidden_states.shape[1] - (dominant_num + 1), - metric.shape[2], - ) - - hidden_states_filtered = hidden_states.masked_select( - mask.unsqueeze(-1) - ).view( - hidden_states.shape[0], - hidden_states.shape[1] - (dominant_num + 1), - hidden_states.shape[2], - ) - - metric_normalized = metric_filtered / metric_filtered.norm( - dim=-1, keepdim=True - ) - - # Contextual Visual Tokens - step = max(1, metric_normalized.shape[1] // contextual_num) - target_indices = torch.arange( - 0, metric_normalized.shape[1], step, device=metric_normalized.device - )[:contextual_num] - - # keep_idxs - index_masks = ~mask - if not pruning_paras['prune_only']: - pruned_indices = mask.nonzero(as_tuple=False)[:, 1].view(hidden_states.shape[0], -1) - target_index = pruned_indices[:, target_indices] - index_masks.scatter_(1, target_index, True) - pruning_paras['index_masks'] = index_masks[:, 1:] - - target_tokens = metric_normalized[:, target_indices, :] - - tokens_to_merge = metric_normalized[ - :, - ~torch.isin( - torch.arange( - metric_normalized.shape[1], device=metric_normalized.device - ), - target_indices, - ), - :, - ] - similarity = torch.bmm(tokens_to_merge, target_tokens.transpose(1, 2)) - assign_one_hot = torch.zeros( - tokens_to_merge.shape[0], - tokens_to_merge.shape[1], - contextual_num, - dtype=hidden_states_filtered.dtype, - device=metric_normalized.device, - ) - assign_one_hot.scatter_(2, similarity.argmax(dim=2).unsqueeze(-1), 1) - counts = assign_one_hot.sum(dim=1).clamp(min=1).unsqueeze(-1) - hidden_to_merge = hidden_states_filtered[ - :, - ~torch.isin( - torch.arange( - hidden_states_filtered.shape[1], - device=hidden_states_filtered.device, - ), - target_indices, - ), - :, - ] - aggregated_hidden = ( - torch.bmm(assign_one_hot.transpose(1, 2), hidden_to_merge) / counts - ) - target_hidden = hidden_states_filtered[:, target_indices, :] - - contextual_tokens = target_hidden + aggregated_hidden - - # Merge with target hidden states and concatenate - hidden_states_save = torch.cat( - [dominant_tokens, contextual_tokens], dim=1 - ).to(images[0].dtype) - - res = list(image_forward_outs.hidden_states) - if not llava_next: - if pruning_paras['prune_only']: - res[-2] = dominant_tokens.contiguous().to(images[0].dtype) - else: - res[-2] = hidden_states_save.contiguous() - image_forward_outs.hidden_states = tuple(res) - - return image_forward_outs - - def store_key_hook(m, x, outputs): - bsz = x[0].shape[0] - raw_outputs = ( - outputs.view(bsz, -1, m.num_heads, m.head_dim) - .transpose(1, 2) - .contiguous() - ) - m.metric = raw_outputs.clone().mean(1) - - # output_attentions - def update_output_attentions_hook(module, args, kwargs): - kwargs['output_attentions'] = True - return args, kwargs - - def update_index_masks_hook(module, inps, outs, pruning_paras): - module.index_masks = pruning_paras['index_masks'] - - if self.model.__class__.__name__ == 'LlavaHf': - vision_tower = self.model.vlm_model.vision_tower - elif self.model.__class__.__name__ == 'Llava': - vision_tower = self.model.vision_model.vision_tower - - if self.model.__class__.__name__ in ('LlavaHf', 'Llava'): - apply_info( - vision_tower, - dominant_num=self.dominant, - contextual_num=self.contextual, - ) - - if self.model.__class__.__name__ == 'LlavaHf': - self.model.vlm_model.__class__.forward = visionzip_forward - if self.model.__class__.__name__ in ('LlavaHf', 'Llava'): - vision_tower.register_forward_pre_hook( - update_output_attentions_hook, with_kwargs=True - ) - - r = vision_tower.r - for idx, block in enumerate(self.blocks): - if r[idx]: - block.self_attn.k_proj.num_heads = block.self_attn.num_heads - block.self_attn.k_proj.head_dim = block.self_attn.head_dim - block.self_attn.k_proj.register_forward_hook(store_key_hook) - - vision_tower.register_forward_hook( - functools.partial( - visionzip_hook, - pruning_paras=self.pruning_paras, - llava_next=self.special_config['vision_token_length'] is None - ) - ) - - # llava_next - if self.special_config['vision_token_length'] is None: - - self.model.vlm_model.prepare_inputs_labels_for_multimodal = MethodType( - prepare_inputs_labels_for_multimodal_with_index_masks, - self.model.vlm_model - ) - - self.model.vision_model.register_forward_hook( - functools.partial(update_index_masks_hook, pruning_paras=self.pruning_paras), - ) - - def get_metric(fn, pruning_paras): - @wraps(fn) - def wrapper(self, *args, **kwargs): - return fn(self, *args, pruning_paras=pruning_paras, **kwargs) - return wrapper - - def merger_hook(module, inputs, kwargs, layer_outs, pruning_paras): - with torch.no_grad(): - attn_mean = pruning_paras['attn_logits'].mean(dim=0) # 16 1120, 1120 -> 1120, 1120 - attn_key = pruning_paras['attn_key'] - - window_index, _ = module.get_window_index(kwargs['grid_thw']) - reverse_indices = torch.argsort(window_index) - - attn_mean = attn_mean.sum(dim=0) - attn_mean = attn_mean.view(attn_mean.shape[0] // 4, -1).mean(dim=-1) - attn_mean = attn_mean[reverse_indices] - - attn_key = attn_key.view( - attn_key.shape[0], attn_key.shape[1] // 4, - 4, attn_key.shape[-1] - ).mean(dim=2) - attn_key = attn_key[:, reverse_indices, :].mean(dim=0).unsqueeze(0) - - pruning_paras['attn_logits'] = attn_mean - pruning_paras['attn_key'] = attn_key - return layer_outs - - @prefill_wrapper - def get_input_ids_hook(module, input_args, pruning_paras): - pruning_paras['input_ids'] = input_args[0] - return input_args - - def prune_qwenv25vl_hook(module, args, kwargs, pruning_paras): - if kwargs['position_ids'].shape[-1] == 1: - return args, kwargs - attn_logits = pruning_paras['attn_logits'] - attn_key = pruning_paras['attn_key'] - inputs_embeds = kwargs['inputs_embeds'] - position_ids = kwargs['position_ids'] - attention_mask = kwargs['attention_mask'] - - dominant_num = int(self.dominant * attn_logits.size(0)) - contextual_num = max(int(self.contextual * attn_logits.size(0)), 1) - topk_values, topk_indices = torch.topk(attn_logits, dominant_num) - - mask = torch.zeros_like(attn_logits, dtype=torch.bool) - mask[topk_indices] = True - contextual_mask = ~mask - metric_filtered = attn_key[:, contextual_mask] - metric_normalized = metric_filtered / metric_filtered.norm(dim=-1, keepdim=True) - del attn_key, metric_filtered - - # Contextual Visual Tokens - step = max(1, metric_normalized.shape[1] // contextual_num) - target_indices = torch.arange( - 0, metric_normalized.shape[1], step, - device=metric_normalized.device - )[:contextual_num] - target_tokens = metric_normalized[:, target_indices, :] - - tokens_to_merge = metric_normalized[ - :, - ~torch.isin( - torch.arange( - metric_normalized.shape[1], - device=metric_normalized.device - ), target_indices - ), - : - ] - similarity = torch.bmm(tokens_to_merge, target_tokens.transpose(1, 2)) - assign_one_hot = torch.zeros( - tokens_to_merge.shape[0], - tokens_to_merge.shape[1], - contextual_num, - dtype=attn_logits.dtype, - device=metric_normalized.device - ) - assign_one_hot.scatter_(2, similarity.argmax(dim=2).unsqueeze(-1), 1) - counts = assign_one_hot.sum(dim=1).clamp(min=1).unsqueeze(-1) - - select_mask = torch.zeros_like(attn_logits, dtype=torch.bool) - select_mask[topk_indices] = True - - false_pos = (~select_mask).nonzero(as_tuple=True)[0] - - select_mask[false_pos[target_indices]] = True - - img_mask = (pruning_paras['input_ids'] == pruning_paras['vision_token_index'])[0] - st_idx = torch.nonzero(img_mask, as_tuple=True)[0] - - if st_idx.numel() > 0: - first, last = st_idx[0].item(), st_idx[-1].item() - img_mask[first: last + 1] = ~select_mask - img_mask = ~img_mask - contextual_input_idx = false_pos[target_indices] + first - - hidden_states_filtered = inputs_embeds[:, first: last + 1][:, contextual_mask] - hidden_to_merge = hidden_states_filtered[ - :, - ~torch.isin( - torch.arange( - hidden_states_filtered.shape[1], - device=hidden_states_filtered.device - ), target_indices - ), - : - ] - aggregated_hidden = torch.bmm(assign_one_hot.transpose(1, 2), hidden_to_merge) / counts - target_hidden = hidden_states_filtered[:, target_indices, :] - - contextual_tokens = target_hidden + aggregated_hidden - - kwargs['position_ids'] = position_ids[:, :, img_mask] - kwargs['attention_mask'] = attention_mask[:, img_mask] - inputs_embeds[:, contextual_input_idx] = contextual_tokens - kwargs['inputs_embeds'] = inputs_embeds[:, img_mask] - del contextual_tokens, hidden_states_filtered, hidden_to_merge, aggregated_hidden - torch.cuda.empty_cache() - return args, kwargs - - if self.model.__class__.__name__ == 'Qwen2_5VL': - self.blocks[-1].attn.forward = MethodType( - get_metric(Qwen2_5_VLVisionAttention_forward, self.pruning_paras), - self.blocks[-1].attn - ) - self.model.vision_model.register_forward_hook( - functools.partial( - merger_hook, - pruning_paras=self.pruning_paras, - ), - with_kwargs=True - ) - self.model.embed_tokens.register_forward_pre_hook( - functools.partial(get_input_ids_hook, pruning_paras=self.pruning_paras) - ) - self.model.language_model.register_forward_pre_hook( - functools.partial( - prune_qwenv25vl_hook, - pruning_paras=self.pruning_paras, - ), - with_kwargs=True - ) diff --git a/llmc/compression/token_reduction/vispruner.py b/llmc/compression/token_reduction/vispruner.py deleted file mode 100644 index afe63fe1c..000000000 --- a/llmc/compression/token_reduction/vispruner.py +++ /dev/null @@ -1,268 +0,0 @@ -import functools -from functools import wraps -from types import MethodType - -import torch - -from llmc.utils.registry_factory import TOKEN_REDUCTION_REGISTRY - -from .token_reduction_module import TokenReductionModule -from .utils import get_anyres_image_grid_shape, unpad_image - - -@TOKEN_REDUCTION_REGISTRY.register('VisPruner') -class VisPruner(TokenReductionModule): - def __init__(self, config, model, blocks): - super().__init__(config, model, blocks) - self.add_sparse_config() - self.register_reduction_modules() - - def add_sparse_config(self): - self.special_config['select_layer'] = self.model.pruning_config.get( - 'select_layer', -1 - ) - self.special_config['select_feature'] = self.model.pruning_config.get( - 'select_feature', None - ) - - self.pruning_paras = self.special_config - - def register_reduction_modules(self): - - def change_images_hook(fn, pruning_paras): - @wraps(fn) - def wrapper(self, *args, **kwargs): - images = args[5] - input_ids = args[0] - vision_tower = self.get_vision_tower() - - if vision_tower is None or images is None or input_ids.shape[1] == 1: - return fn(*args, **kwargs) - - if images.ndim == 5: - args = list(args) - concat_images = torch.cat([image for image in images], dim=0) - args[5] = concat_images.unsqueeze(dim=0).unsqueeze(dim=0) - pruning_paras['image_sizes'] = kwargs['image_sizes'] - pruning_paras['num_patches_per_side'] = vision_tower.num_patches_per_side - if hasattr(vision_tower, 'image_size'): - pruning_paras['vision_tower_image_size'] = vision_tower.image_size - else: - pruning_paras['vision_tower_image_size'] = None - pruning_paras['image_newline'] = self.model.image_newline - - return fn(*tuple(args), **kwargs) - else: - return fn(*args, **kwargs) - return wrapper - - def update_output_attentions_hook(module, args, kwargs): - args = list(args) - if args[0].ndim == 6: - args[0] = args[0].squeeze(dim=0).squeeze(dim=0) - kwargs['output_attentions'] = True - return tuple(args), kwargs - - def store_attention_hook(module, inps, outs, pruning_paras): - image_attentions = outs.attentions[pruning_paras['select_layer']] - if pruning_paras['select_feature'] == 'patch': - image_attentions = image_attentions[:, :, 0, 1:] - elif pruning_paras['select_feature'] == 'cls_patch': - image_attentions = image_attentions - raise ValueError(f"Unexpected select feature: {pruning_paras['select_feature']}") - - pruning_paras['image_attentions'] = image_attentions.to(inps[0].dtype) - - def get_index_masks_hook(module, args, pruning_paras): - image_features = args[0] - image_attentions = pruning_paras['image_attentions'] - - B, N, C = image_features.shape - device = image_features.device - index_masks = torch.ones(B, N, dtype=torch.bool, device=device) - visual_token_num = round(N * (1 - self.special_config['prune_ratio'])) # T - important_ratio = self.pruning_paras['important_ratio'] # r - important_token_num = int(visual_token_num * important_ratio) # T_imp = T * r - diverse_token_num = visual_token_num - important_token_num # T_div = T * (1 - r) - - # [VisPruner] Select important tokens using attention scores - image_attentions = image_attentions.mean(dim=1) # (B, N) - token_indices = image_attentions.argsort(dim=-1, descending=True) # (B, N) - important_indices = token_indices[:, :important_token_num] # (B, T_imp) - residual_indices = token_indices[:, important_token_num:] # (B, N - T_imp) - - # [VisPruner] Remove duplicate tokens by iterative matching and pruning - image_normalized = image_features / image_features.norm(dim=-1, keepdim=True) - while diverse_token_num > 0: - R = residual_indices.shape[1] - r = min(8, R - diverse_token_num) - if r <= 0: - break - - residual_tokens = image_normalized[ - torch.arange(B).unsqueeze(-1).expand(-1, R), - residual_indices - ] # (B, R, C) - a, b = residual_tokens[..., ::2, :], residual_tokens[..., 1::2, :] # (B, R // 2, C) - scores = a @ b.transpose(-1, -2) # (B, R // 2, R // 2) - scores = scores.max(dim=-1).values # (B, R // 2) - - distinct_indices = scores.argsort(dim=-1, descending=True)[:, r:] # (B, R // 2 - r) - residual_indices = torch.cat([ - residual_indices[..., ::2][ - torch.arange(B).unsqueeze(-1).expand(-1, R // 2 - r), - distinct_indices - ], - residual_indices[..., 1::2] - ], dim=-1) # (B, R - r) - - if diverse_token_num > 0: - selected_indices = torch.cat([important_indices, residual_indices], dim=-1) - else: - selected_indices = important_indices # (B, T) - index_masks = torch.zeros(B, N, dtype=torch.bool, device=device) - index_masks.scatter_(1, selected_indices, True) - - pruning_paras['index_masks'] = index_masks - - def prune_hook(module, inputs, outputs, pruning_paras, model_config): - image_features = outputs - index_masks = pruning_paras['index_masks'] - - if image_features.shape[0] == 1: - return image_features[index_masks].unsqueeze(0) - - image_sizes = pruning_paras['image_sizes'] - split_sizes = [image_features.shape[0]] - image_features = torch.split(image_features, split_sizes, dim=0) - index_masks = torch.split(index_masks, split_sizes, dim=0) - # 'spatial_unpad', 'anyres' - mm_patch_merge_type = getattr(model_config, 'mm_patch_merge_type', 'flat') - # mm_patch_merge_type = mm_patch_merge_type.replace('_unpad', '') - image_aspect_ratio = getattr(model_config, 'image_aspect_ratio', 'square') - - if mm_patch_merge_type == 'flat': - image_features = [x.flatten(0, 1) for x in image_features] - index_masks = [x.flatten(0, 1) for x in index_masks] - image_features = [x[m] for x, m in zip(image_features, index_masks)] - elif mm_patch_merge_type.startswith('spatial'): - new_image_features = [] - for image_idx, (image_feature, index_mask) in enumerate( - zip(image_features, index_masks) - ): - if image_feature.shape[0] > 1: - base_image_feature, base_index_mask = image_feature[0], index_mask[0] - image_feature, index_mask = image_feature[1:], index_mask[1:] - height = width = pruning_paras['num_patches_per_side'] - assert height * width == base_image_feature.shape[0] - - if image_aspect_ratio == 'anyres': - if pruning_paras['vision_tower_image_size'] is not None: - vision_tower_image_size = pruning_paras['vision_tower_image_size'] - else: - raise ValueError( - 'vision_tower_image_size is not found in the vision tower.' - ) - try: - num_patch_width, num_patch_height = get_anyres_image_grid_shape( - image_sizes[image_idx], - model_config.image_grid_pinpoints, - vision_tower_image_size - ) - except Exception: - num_patch_width, num_patch_height = 2, 2 - image_feature = image_feature.view( - num_patch_height, num_patch_width, height, width, -1 - ) - index_mask = index_mask.view( - num_patch_height, num_patch_width, height, width - ) - else: - raise NotImplementedError - - if 'maxpool2x2' in mm_patch_merge_type: - raise NotImplementedError - elif 'unpad' in mm_patch_merge_type and 'anyres_max' in image_aspect_ratio: - raise NotImplementedError - elif 'unpad' in mm_patch_merge_type: - image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous() - image_feature = image_feature.flatten(1, 2).flatten(2, 3) - image_feature = unpad_image(image_feature, image_sizes[image_idx]) - image_feature = torch.cat(( - image_feature, - pruning_paras['image_newline'][:, None, None].expand( - *image_feature.shape[:-1], 1 - ).to(image_feature.device) - ), dim=-1) - image_feature = image_feature.flatten(1, 2).transpose(0, 1) - index_mask = index_mask.permute(0, 2, 1, 3).contiguous().unsqueeze(0) - index_mask = index_mask.flatten(1, 2).flatten(2, 3) - index_mask = unpad_image(index_mask, image_sizes[image_idx]) - index_mask = torch.cat(( - index_mask, - torch.ones( - *index_mask.shape[:-1], 1, dtype=torch.bool - ).to(index_mask.device) - ), dim=-1) - index_mask = index_mask.flatten(1, 2).squeeze(0) - image_feature = image_feature[index_mask] - else: - image_feature = image_feature.permute(0, 2, 1, 3, 4).contiguous() - image_feature = image_feature.flatten(0, 3) - index_mask = index_mask.permute(0, 2, 1, 3).contiguous() - index_mask = index_mask.flatten(0, 3) - image_feature = image_feature[index_mask] - if 'nobase' in mm_patch_merge_type: - raise NotImplementedError - else: - base_image_feature = base_image_feature[base_index_mask] - image_feature = torch.cat((base_image_feature, image_feature), dim=0) - else: # single image operations - image_feature = image_feature[0] - index_mask = index_mask[0] - if 'unpad' in mm_patch_merge_type: - image_feature = torch.cat(( - image_feature, - pruning_paras['image_newline'][None] - ), dim=0) - index_mask = torch.cat(( - index_mask, - torch.ones(1, dtype=torch.bool).to(index_mask.device) - ), dim=0) - image_feature = image_feature[index_mask] - new_image_features.append(image_feature) - image_features = new_image_features - else: - raise ValueError( - f'Unexpected mm_patch_merge_type: {model_config.mm_patch_merge_type}' - ) - return image_features - - self.model.vlm_model.prepare_inputs_labels_for_multimodal = MethodType( - change_images_hook( - self.model.vlm_model.prepare_inputs_labels_for_multimodal, - self.pruning_paras - ), - self.model.vlm_model - ) - - self.model.vision_model.vision_tower.register_forward_pre_hook( - update_output_attentions_hook, - with_kwargs=True - ) - - self.model.vision_model.vision_tower.register_forward_hook( - functools.partial(store_attention_hook, pruning_paras=self.pruning_paras), - ) - - self.model.vision_projector.register_forward_pre_hook( - functools.partial(get_index_masks_hook, pruning_paras=self.pruning_paras), - ) - - self.model.vision_projector.register_forward_hook( - functools.partial( - prune_hook, - pruning_paras=self.pruning_paras, - model_config=self.model.vlm_model_config - ), - ) diff --git a/llmc/compression/token_reduction/visualizer.py b/llmc/compression/token_reduction/visualizer.py deleted file mode 100644 index 732b901a8..000000000 --- a/llmc/compression/token_reduction/visualizer.py +++ /dev/null @@ -1,78 +0,0 @@ -import functools - -from llmc.utils.registry_factory import TOKEN_REDUCTION_REGISTRY -from llmc.utils.visualizer import (visualize_grid_to_grid, visualize_heads, - visualize_kept_patches) - -from .token_reduction_module import TokenReductionModule -from .utils import prefill_wrapper - - -@TOKEN_REDUCTION_REGISTRY.register('Visualizer') -class Visualizer(TokenReductionModule): - def __init__(self, config, model, blocks): - super().__init__(config, model, blocks) - self.add_sparse_config() - self.register_reduction_modules() - - def add_sparse_config(self): - self.pruning_paras = self.special_config - self.pruning_paras['attentions'] = [] - - def register_reduction_modules(self): - - @prefill_wrapper - def update_attentions_hook(module, args, kwargs): - kwargs['output_attentions'] = True - return args, kwargs - - @prefill_wrapper - def get_images_hook(module, input_args, pruning_paras): - pruning_paras['images'] = input_args[0] - return input_args - - @prefill_wrapper - def get_attentions_hook(module, inps, layer_outs, pruning_paras): - pruning_paras['attentions'].append(layer_outs[1]) - return layer_outs - - @prefill_wrapper - def visualizer_hook(module, inps, layer_outs, pruning_paras): - attention_maps = pruning_paras['attentions'][0] - visual_attention_maps = attention_maps[:, :, 35: 35 + 576, 35: 35 + 576] - image = pruning_paras['images'][0] - - visualize_heads( - visual_attention_maps[:, :6], - cols=4, - save_path='' - ) - visualize_grid_to_grid( - visual_attention_maps[0, 31, :, :], - 300, - image, - grid_size=24, - save_path='' - ) - visualize_kept_patches( - pruning_paras['images'][0], - pruning_paras['visual_keep_indexs'], - save_path='', - ) - return layer_outs - - self.model.vision_model.register_forward_pre_hook( - functools.partial(get_images_hook, pruning_paras=self.pruning_paras), - ) - - for idx, blk in enumerate(self.blocks): - if idx == 5: - blk.register_forward_pre_hook(update_attentions_hook, with_kwargs=True) - blk.register_forward_hook( - functools.partial(get_attentions_hook, pruning_paras=self.pruning_paras), - ) - if idx == (len(self.blocks) - 1): - # self.model.language_model.layers[-1] - blk.register_forward_hook( - functools.partial(visualizer_hook, pruning_paras=self.pruning_paras), - ) diff --git a/llmc/data/__init__.py b/llmc/data/__init__.py deleted file mode 100644 index 29b8c7e37..000000000 --- a/llmc/data/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .dataset import BaseDataset diff --git a/llmc/data/dataset/__init__.py b/llmc/data/dataset/__init__.py deleted file mode 100644 index b1933afee..000000000 --- a/llmc/data/dataset/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .base_dataset import BaseDataset diff --git a/llmc/data/dataset/base_dataset.py b/llmc/data/dataset/base_dataset.py deleted file mode 100755 index 7af3de73a..000000000 --- a/llmc/data/dataset/base_dataset.py +++ /dev/null @@ -1,223 +0,0 @@ -import json -import os -from abc import ABCMeta - -import torch -from datasets import load_dataset, load_from_disk -from loguru import logger -from PIL import Image -from torch.nn import functional as F - -from .specified_preproc import PREPROC_REGISTRY - - -class BaseDataset(metaclass=ABCMeta): - def __init__(self, tokenizer, calib_cfg, batch_process=None): - # calib_cfg - logger.info(f'calib_cfg : {calib_cfg}') - self.tokenizer = tokenizer - self.batch_process = batch_process - self.calib_dataset_name = calib_cfg['name'] - self.padding = calib_cfg.get('padding', False) - if self.calib_dataset_name == 'ultrachat': - assert self.padding - self.download = calib_cfg['download'] - self.load_from_txt = calib_cfg.get('load_from_txt', False) - self.calib_dataset_path = calib_cfg.get('path', None) - self.apply_chat_template = calib_cfg.get('apply_chat_template', False) - self.n_samples = calib_cfg.get('n_samples', None) - self.calib_bs = calib_cfg['bs'] - if self.calib_dataset_name in ['t2v', 'i2v']: - assert self.calib_bs == 1 - self.seq_len = calib_cfg.get('seq_len', None) - self.preproc = calib_cfg.get('preproc', False) - if self.calib_dataset_name == 'ultrachat': - assert self.preproc == 'ultrachat_general' - if self.preproc == 'original_txt': - assert self.seq_len is None - self.seed = calib_cfg['seed'] - self.calib_dataset_field_map = { - 'pileval': 'text', - 'c4': 'text', - 'wikitext2': 'text', - 'ptb': 'sentence', - } - if self.calib_dataset_name in self.calib_dataset_field_map: - self.key = self.calib_dataset_field_map[self.calib_dataset_name] - self.build_calib_dataset() - - def build_calib_dataset(self): - if self.download: - if self.calib_dataset_name == 'pileval': - self.calib_dataset = load_dataset( - 'mit-han-lab/pile-val-backup', split='validation' - ) - elif self.calib_dataset_name == 'c4': - self.calib_dataset = load_dataset( - 'allenai/c4', - data_files={'train': 'en/c4-train.00000-of-01024.json.gz'}, - split='train', - ) - elif self.calib_dataset_name == 'wikitext2': - self.calib_dataset = load_dataset( - 'wikitext', 'wikitext-2-raw-v1', split='train' - ) - elif self.calib_dataset_name == 'ptb': - self.calib_dataset = load_dataset( - 'ptb_text_only', 'penn_treebank', split='train' - ) - elif self.calib_dataset_name == 'ultrachat': - self.calib_dataset = load_dataset( - 'HuggingFaceH4/ultrachat_200k', split='train_sft' - ) - else: - raise Exception(f'Not support {self.calib_dataset_name} dataset.') - else: - if self.calib_dataset_name in [ - 'custom_txt', - 'custom_mm', - 'images', - 't2v', - 'i2v', - ]: - self.calib_dataset = self.get_custom_dataset(self.calib_dataset_path) - else: - self.calib_dataset = load_from_disk(self.calib_dataset_path) - - def get_calib_model_inputs(self, samples): - if not self.padding: - if self.calib_dataset_name in ['t2v', 'i2v']: - calib_model_inputs = samples - elif self.calib_dataset_name == 'images': - calib_model_inputs = self.get_batch_process(samples) - else: - assert not self.calib_dataset_name == 'custom_mm' - if self.calib_dataset_name == 'custom_txt': - txts = self.batch_process( - samples, - calib_or_eval='calib', - apply_chat_template=self.apply_chat_template, - return_inputs=False, - ) - else: - txts = self.calib_dataset - preproc = PREPROC_REGISTRY[self.preproc] - preproc_param_dict = { - 'calib_dataset': txts, - 'tokenizer': self.tokenizer, - 'n_samples': self.n_samples, - 'seq_len': self.seq_len, - } - if self.preproc == 'txt_general_preproc': - preproc_param_dict['key'] = self.key - samples = preproc(**preproc_param_dict) - calib_model_inputs = [] - if self.calib_bs == -1: - batch = torch.cat(samples, dim=0) - calib_model_inputs.append({'input_ids': batch}) - elif self.calib_bs == 1: - for i in range(len(samples)): - calib_model_inputs.append({'input_ids': samples[i]}) - elif self.calib_bs > 1: - for i in range(0, len(samples), self.calib_bs): - start = i - end = min(i + self.calib_bs, len(samples)) - batch = samples[start:end] - batch = torch.cat(batch, dim=0) - calib_model_inputs.append({'input_ids': batch}) - else: - assert ( - self.calib_dataset_name == 'custom_txt' - or self.calib_dataset_name == 'custom_mm' - ) - calib_model_inputs = self.get_batch_process(samples) - return calib_model_inputs - - def get_batch_process(self, samples): - calib_model_inputs = [] - if self.calib_bs == -1: - calib_model_inputs.append( - self.batch_process( - samples, - calib_or_eval='calib', - apply_chat_template=self.apply_chat_template, - ) - ) - elif self.calib_bs == 1: - calib_model_inputs = [ - self.batch_process( - [sample], - calib_or_eval='calib', - apply_chat_template=self.apply_chat_template, - ) - for sample in samples - ] - elif self.calib_bs > 1: - for i in range(0, len(samples), self.calib_bs): - start = i - end = min(i + self.calib_bs, len(samples)) - batch = samples[start:end] - calib_model_inputs.append( - self.batch_process( - batch, - calib_or_eval='calib', - apply_chat_template=self.apply_chat_template, - ) - ) - return calib_model_inputs - - def get_calib_dataset(self): - samples = self.calib_dataset[ - int(os.environ['RANK'])::int(os.environ['WORLD_SIZE']) - ] - logger.info(f'len(samples) rank : {len(samples)}') - - calib_model_inputs = self.get_calib_model_inputs(samples) - logger.info(f'len(calib_model_inputs) : {len(calib_model_inputs)}') - if self.padding: - padding_mask = [ - calib_model_input['attention_mask'] - for calib_model_input in calib_model_inputs - ] - else: - padding_mask = None - return calib_model_inputs, padding_mask - - def get_custom_dataset(self, custom_dataset_path): - audio_img_qa_json = os.path.join(custom_dataset_path, 'samples.json') - fp = open(audio_img_qa_json) - custom_data_samples = json.load(fp) - for idx in range(len(custom_data_samples)): - if 'audio' in custom_data_samples[idx]: - if isinstance(custom_data_samples[idx]['audio'], list): - for audio_idx in range(len(custom_data_samples[idx]['audio'])): - custom_data_samples[idx]['audio'][audio_idx] = os.path.join( - custom_dataset_path, custom_data_samples[idx]['audio'][audio_idx] - ) - else: - custom_data_samples[idx]['audio'] = os.path.join( - custom_dataset_path, custom_data_samples[idx]['audio'] - ) - else: - custom_data_samples[idx]['audio'] = None - if 'image' in custom_data_samples[idx]: - if isinstance(custom_data_samples[idx]['image'], list): - for img_idx in range(len(custom_data_samples[idx]['image'])): - custom_data_samples[idx]['image'][img_idx] = os.path.join( - custom_dataset_path, custom_data_samples[idx]['image'][img_idx] - ) - else: - custom_data_samples[idx]['image'] = os.path.join( - custom_dataset_path, custom_data_samples[idx]['image'] - ) - else: - custom_data_samples[idx]['image'] = None - if 'question' not in custom_data_samples[idx]: - custom_data_samples[idx]['question'] = '' - if 'answer' not in custom_data_samples[idx]: - custom_data_samples[idx]['answer'] = '' - if 'prompt' not in custom_data_samples[idx]: - custom_data_samples[idx]['prompt'] = '' - if 'negative_prompt' not in custom_data_samples[idx]: - custom_data_samples[idx]['negative_prompt'] = '' - return custom_data_samples diff --git a/llmc/data/dataset/specified_preproc.py b/llmc/data/dataset/specified_preproc.py deleted file mode 100644 index a996fead3..000000000 --- a/llmc/data/dataset/specified_preproc.py +++ /dev/null @@ -1,169 +0,0 @@ -import json -import os -import random - -import torch - -from llmc.utils.registry_factory import PREPROC_REGISTRY - - -@PREPROC_REGISTRY -def wikitext2_gptq(calib_dataset, tokenizer, n_samples, seq_len): - trainenc = tokenizer('\n\n'.join(calib_dataset['text']), return_tensors='pt') - samples = [] - for _ in range(n_samples): - i = random.randint(0, trainenc.input_ids.shape[1] - seq_len - 1) - j = i + seq_len - inp = trainenc.input_ids[:, i:j] - samples.append(inp) - return samples - - -@PREPROC_REGISTRY -def ptb_gptq(calib_dataset, tokenizer, n_samples, seq_len): - trainenc = tokenizer(' '.join(calib_dataset['sentence']), return_tensors='pt') - samples = [] - for _ in range(n_samples): - i = random.randint(0, trainenc.input_ids.shape[1] - seq_len - 1) - j = i + seq_len - inp = trainenc.input_ids[:, i:j] - samples.append(inp) - return samples - - -@PREPROC_REGISTRY -def c4_gptq(calib_dataset, tokenizer, n_samples, seq_len): - samples = [] - for _ in range(n_samples): - while True: - i = random.randint(0, len(calib_dataset) - 1) - trainenc = tokenizer(calib_dataset[i]['text'], return_tensors='pt') - if trainenc.input_ids.shape[1] >= seq_len: - break - i = random.randint(0, trainenc.input_ids.shape[1] - seq_len - 1) - j = i + seq_len - inp = trainenc.input_ids[:, i:j] - samples.append(inp) - return samples - - -@PREPROC_REGISTRY -def pileval_awq(calib_dataset, tokenizer, n_samples, seq_len): - dataset = calib_dataset.shuffle(seed=42) - samples = [] - n_run = 0 - for data in dataset: - line = data['text'] - line = line.strip() - line_encoded = tokenizer.encode(line) - if len(line_encoded) > seq_len: - continue - sample = torch.tensor([line_encoded]) - if sample.numel() == 0: - continue - samples.append(sample) - n_run += 1 - if n_run == n_samples: - break - samples = torch.cat(samples, dim=1) - n_split = samples.shape[1] // seq_len - samples = [samples[:, i * seq_len: (i + 1) * seq_len] for i in range(n_split)] - return samples - - -@PREPROC_REGISTRY -def pileval_smooth(calib_dataset, tokenizer, n_samples, seq_len): - dataset = calib_dataset.shuffle(seed=42) - samples = [] - n_run = 0 - for data in dataset: - line = data['text'] - trainenc = tokenizer( - line, return_tensors='pt', max_length=seq_len, truncation=True - ) - line_encoded = trainenc.input_ids - samples.append(line_encoded) - n_run += 1 - if n_run == n_samples: - break - return samples - - -@PREPROC_REGISTRY -def pileval_omni(calib_dataset, tokenizer, n_samples, seq_len): - trainenc = tokenizer('\n\n'.join(calib_dataset['text'][:1000]), return_tensors='pt') - samples = [] - for _ in range(n_samples): - i = random.randint(0, trainenc.input_ids.shape[1] - seq_len - 1) - j = i + seq_len - inp = trainenc.input_ids[:, i:j] - samples.append(inp) - return samples - - -@PREPROC_REGISTRY -def img_general(calib_dataset, tokenizer, batch_process, n_samples): - random.shuffle(calib_dataset) - if len(calib_dataset) > n_samples: - calib_dataset = calib_dataset[:n_samples] - samples = batch_process(calib_dataset) - return samples - - -@PREPROC_REGISTRY -def random_truncate_txt(calib_dataset, tokenizer, n_samples, seq_len): - random.shuffle(calib_dataset) - trainenc = tokenizer('\n\n'.join(calib_dataset), return_tensors='pt') - samples = [] - for _ in range(n_samples): - i = random.randint(0, trainenc.input_ids.shape[1] - seq_len - 1) - j = i + seq_len - inp = trainenc.input_ids[:, i:j] - samples.append(inp) - return samples - - -@PREPROC_REGISTRY -def ultrachat_general(calib_dataset, tokenizer, n_samples, seq_len): - calib_dataset = calib_dataset.shuffle(seed=42).select(range(n_samples)) - texts = [] - samples = [] - for example in calib_dataset: - text = tokenizer.apply_chat_template( - example['messages'], - tokenize=False, - ) - texts.append(text) - - for i in range(n_samples): - trainenc = tokenizer( - texts[i], - padding=False, - max_length=seq_len, - truncation=True, - add_special_tokens=False, - return_tensors='pt' - ) - inp = trainenc.input_ids - samples.append(inp) - return samples - - -@PREPROC_REGISTRY -def txt_general_preproc(calib_dataset, tokenizer, n_samples, seq_len, key): - dataset = calib_dataset.shuffle(seed=42) - samples = [] - n_run = 0 - for data in dataset: - line = data[key] - trainenc = tokenizer( - line, return_tensors='pt', max_length=seq_len, truncation=True - ) - line_encoded = trainenc.input_ids - if line_encoded.shape[1] < seq_len: - continue - samples.append(line_encoded) - n_run += 1 - if n_run == n_samples: - break - return samples diff --git a/llmc/eval/__init__.py b/llmc/eval/__init__.py deleted file mode 100755 index 74ec5d15c..000000000 --- a/llmc/eval/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .eval_acc import AccuracyEval -from .eval_code import HumanEval -from .eval_custom_generate import CustomGenerate -from .eval_custom_generate_just_infer import CustomGenerateJustInfer -from .eval_ppl import DecodePerplexityEval, PerplexityEval -from .eval_token_consist import TokenConsistencyEval -from .eval_video_generate import VideoGenerateEval -from .eval_vqa import VQAEval diff --git a/llmc/eval/eval_acc.py b/llmc/eval/eval_acc.py deleted file mode 100755 index e8eeb11b1..000000000 --- a/llmc/eval/eval_acc.py +++ /dev/null @@ -1,61 +0,0 @@ -import gc - -import torch -from loguru import logger -from torch.utils.data import DataLoader -from torchvision import transforms -from torchvision.datasets import ImageFolder - - -class AccuracyEval: - def __init__(self, config): - self.eval_config = config.eval - self.imagenet_root = self.eval_config['path'] - self.bs = self.eval_config['bs'] - self.num_workers = self.eval_config.get('num_workers', 8) - self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - - def load_imagenet(self): - val_transform = transforms.Compose([ - transforms.Resize(256), - transforms.CenterCrop(224), - ]) - val_dataset = ImageFolder(root=self.imagenet_root, transform=val_transform) - val_loader = DataLoader( - val_dataset, - batch_size=self.bs, - shuffle=False, - num_workers=self.num_workers, - collate_fn=lambda x: x, - pin_memory=True - ) - return val_loader - - def eval(self, model, eval_pos=None): - self.model = model.get_model() - self.processor = model.processor - self.model.eval() - self.model.to(self.device) - - val_loader = self.load_imagenet() - correct = 0 - total = 0 - - with torch.no_grad(): - for batch_idx, batch in enumerate(val_loader): - logger.info(f'Processed batch {batch_idx + 1}/{len(val_loader)}') - imgs, labels = zip(*batch) - labels = torch.tensor(labels).to(self.device) - inputs = self.processor(images=list(imgs), return_tensors='pt') - inputs = {k: v.to(self.device) for k, v in inputs.items()} - outputs = self.model(**inputs) - _, predicted = torch.max(outputs.logits, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - accuracy = 100 * correct / total - - self.model.cpu() - gc.collect() - torch.cuda.empty_cache() - - return accuracy diff --git a/llmc/eval/eval_base.py b/llmc/eval/eval_base.py deleted file mode 100755 index 7916b2059..000000000 --- a/llmc/eval/eval_base.py +++ /dev/null @@ -1,233 +0,0 @@ -import gc -import json -import os -from concurrent.futures import ThreadPoolExecutor - -import torch -import torch.nn as nn -from datasets import load_dataset, load_from_disk -from human_eval.data import read_problems -from loguru import logger - - -class BaseEval: - def __init__(self, model, config): - self.model = model - self.config = config - self.tokenizer = self.model.get_tokenizer() - # eval_cfg - self.eval_cfg = config.eval - self.model_type = config.model.type - logger.info(f'eval_cfg : {self.eval_cfg}') - self.eval_dataset_name = self.eval_cfg['name'] - self.dataset_type = self.eval_cfg.get('type', 'ppl') - assert self.eval_dataset_name in [ - 'wikitext2', - 'c4', - 'ptb', - 'custom', - 'human_eval', - 'mme', - 'custom_ppl', - 'custom_gen', - 't2v', - 'i2v', - ], f'Not support {self.dataset} dataset now.' - self.seq_len = self.eval_cfg.get('seq_len', None) - self.num_samples = self.eval_cfg.get('num_samples', None) - self.num_eval_tokens = self.eval_cfg.get('num_eval_tokens', None) - self.eval_dataset_bs = self.eval_cfg['bs'] - self.eval_dataset_path = self.eval_cfg.get('path', None) - self.apply_chat_template = self.eval_cfg.get('apply_chat_template', False) - self.download = self.eval_cfg.get('download', False) - self.load_from_txt = self.eval_cfg.get('load_from_txt', False) - self.inference_per_block = self.eval_cfg.get('inference_per_block', False) - self.testenc = self.build_data() - - @torch.no_grad() - def build_data(self): - # load data - if self.eval_dataset_name == 'human_eval': - testenc = read_problems() - else: - if self.download: - if self.eval_dataset_name == 'wikitext2': - testdata = load_dataset( - 'wikitext', 'wikitext-2-raw-v1', split='test' - ) - elif self.eval_dataset_name == 'c4': - testdata = load_dataset( - 'allenai/c4', - data_files={ - 'validation': 'en/c4-validation.00000-of-00008.json.gz' - }, - split='validation', - ) - elif self.eval_dataset_name == 'ptb': - testdata = load_dataset( - 'ptb_text_only', 'penn_treebank', split='test' - ) - else: - if self.eval_dataset_name in ['custom_gen', 'custom_ppl', 't2v', 'i2v']: - testdata = self.get_cutomdata(self.eval_dataset_path) - else: - assert self.eval_dataset_path, 'Please set path in eval_cfg.' - testdata = load_from_disk(self.eval_dataset_path) - self.testdata = testdata - # encode data - if self.dataset_type == 'decode_ppl': - assert self.eval_dataset_name == 'wikitext2' - testenc = testdata['text'] - elif self.eval_dataset_name == 'wikitext2': - testenc = self.tokenizer( - '\n\n'.join(testdata['text']), return_tensors='pt' - ) - elif self.eval_dataset_name == 'c4': - testenc = self.tokenizer( - ' '.join(testdata[:1100]['text']), return_tensors='pt' - ) - testenc.input_ids = testenc.input_ids[:, : (256 * self.seq_len)] - elif self.eval_dataset_name == 'ptb': - testenc = self.tokenizer( - ' '.join(testdata['sentence']), return_tensors='pt' - ) - elif self.eval_dataset_name == 'custom_ppl': - testenc = self.tokenizer( - '\n'.join([data['question'] + data['answer'] if 'answer' in data else data['question'] for data in testdata]), # noqa - return_tensors='pt', - ) - elif self.eval_dataset_name == 'custom_gen': - testenc = [] - if self.eval_dataset_bs < 0: - testenc.append( - self.model.batch_process( - testdata, - calib_or_eval='eval', - apply_chat_template=self.apply_chat_template - ) - ) - elif self.eval_dataset_bs == 1: - testenc = [ - self.model.batch_process( - [sample], - calib_or_eval='eval', - apply_chat_template=self.apply_chat_template - ) - for sample in testdata - ] # noqa - elif self.eval_dataset_bs > 1: - for i in range(0, len(testdata), self.eval_dataset_bs): - start = i - end = min(i + self.eval_dataset_bs, len(testdata)) - batch = testdata[start:end] - testenc.append( - self.model.batch_process( - batch, - calib_or_eval='eval', - apply_chat_template=self.apply_chat_template - ) - ) - elif self.eval_dataset_name in ['t2v', 'i2v']: - testenc = self.testdata - return testenc - - def get_cutomdata(self, custom_dataset): - audio_img_qa_json = os.path.join(custom_dataset, 'samples.json') - fp = open(audio_img_qa_json) - custom_data_samples = json.load(fp) - for idx in range(len(custom_data_samples)): - if 'audio' in custom_data_samples[idx]: - if isinstance(custom_data_samples[idx]['audio'], list): - for audio_idx in range(len(custom_data_samples[idx]['audio'])): - custom_data_samples[idx]['audio'][audio_idx] = os.path.join( - custom_dataset, custom_data_samples[idx]['audio'][audio_idx] - ) - else: - custom_data_samples[idx]['audio'] = os.path.join( - custom_dataset, custom_data_samples[idx]['audio'] - ) - else: - custom_data_samples[idx]['audio'] = None - if 'image' in custom_data_samples[idx]: - if isinstance(custom_data_samples[idx]['image'], list): - for img_idx in range(len(custom_data_samples[idx]['image'])): - custom_data_samples[idx]['image'][img_idx] = os.path.join( - custom_dataset, custom_data_samples[idx]['image'][img_idx] - ) - else: - custom_data_samples[idx]['image'] = os.path.join( - custom_dataset, custom_data_samples[idx]['image'] - ) - else: - custom_data_samples[idx]['image'] = None - if 'question' not in custom_data_samples[idx]: - custom_data_samples[idx]['question'] = '' - if 'answer' not in custom_data_samples[idx]: - custom_data_samples[idx]['answer'] = '' - if 'prompt' not in custom_data_samples[idx]: - custom_data_samples[idx]['prompt'] = '' - if 'negative_prompt' not in custom_data_samples[idx]: - custom_data_samples[idx]['negative_prompt'] = '' - return custom_data_samples - - @torch.no_grad() - def forward_pre_hook(self, m, x): - m.cuda() - - @torch.no_grad() - def forward_hook(self, m, x, y): - with ThreadPoolExecutor() as executor: - executor.submit(self.load_layer_to_cpu, m) - - @torch.no_grad() - def load_layer_to_cpu(self, m): - m.cpu() - - def register_hooks(self, model): - handles = [] - for layer in model.get_blocks(): - handles.append(layer.register_forward_pre_hook(self.forward_pre_hook)) - for layer in model.get_blocks(): - handles.append(layer.register_forward_hook(self.forward_hook)) - for layer in model.get_layers_except_blocks(): - layer.cuda() - return handles - - @torch.no_grad() - def eval(self, model_llmc, eval_pos=None): - handles = [] - if self.inference_per_block: - handles = self.register_hooks(model_llmc) - else: - if model_llmc.mm_model: - model_llmc.mm_model.cuda() - else: - model_llmc.model.cuda() - - if model_llmc.mm_model: - model_llmc.mm_model.eval() - else: - model_llmc.model.eval() - - eval_res = self.eval_func( - model_llmc, - self.testenc, - self.seq_len, - self.eval_dataset_bs, - eval_pos, - ) - if self.inference_per_block: - for h in handles: - h.remove() - - if model_llmc.mm_model: - model_llmc.mm_model.cpu() - else: - model_llmc.model.cpu() - - gc.collect() - torch.cuda.empty_cache() - return eval_res - - def post_process(self, testenc): - pass diff --git a/llmc/eval/eval_code.py b/llmc/eval/eval_code.py deleted file mode 100644 index a9dcf50fe..000000000 --- a/llmc/eval/eval_code.py +++ /dev/null @@ -1,149 +0,0 @@ -import glob -import os - -import torch -from human_eval.data import stream_jsonl, write_jsonl -from human_eval.evaluation import evaluate_functional_correctness -from loguru import logger -from tqdm import tqdm - -from .eval_base import BaseEval - - -class HumanEval(BaseEval): - def __init__(self, model, config): - super().__init__(model, config) - self.res_path = self.eval_cfg.get('res_path', None) - assert self.res_path is not None - os.makedirs(self.res_path, exist_ok=True) - self.format_tabs = self.eval_cfg.get('format_tabs', False) - self.instruction = self.eval_cfg.get('instruction', - 'Complete the following Python code:') - self.add_chat_temp = self.eval_cfg.get('add_chat_temp', False) - - @torch.no_grad() - def eval_func(self, model, testenc, seq_len, bs, eval_pos): - samples = [] - pbar = tqdm(total=len(testenc) * bs, dynamic_ncols=True, position=0, desc='Evaluating') - - for task_id in testenc: - if self.format_tabs: - prompt = testenc[task_id]['prompt'].replace(' ', '\t') - else: - prompt = testenc[task_id]['prompt'] - prompt = self.gen_prompt(prompt) - batch_completions = self.generate_batch_completion( - model, prompt, bs - ) - - for sample in batch_completions: - result = dict( - task_id=task_id, - completion=sample, - ) - samples += [result] - - pbar.update(bs) - - pbar.close() - - self.output_dir = os.path.join(self.res_path, eval_pos) - - os.makedirs(self.output_dir, exist_ok=True) - out_path = os.path.join(self.output_dir, 'eval.jsonl') - write_jsonl(out_path, samples) - - res = self.post_process(testenc) - return res - - def gen_prompt(self, prompt): - prompt = self.instruction + '\n' + prompt - if self.model_type in ['Starcoder']: - prompt = '' + prompt + '' - - if self.add_chat_temp: - chat_prompt = [{'role': 'user', 'content': prompt}] - chat_prompt = self.tokenizer.apply_chat_template( - chat_prompt, - tokenize=False, - ) - return chat_prompt - - return prompt - - @torch.no_grad() - def generated( - self, - model, - inputs, - max_new_tokens=512, - temperature=0.2, - top_p=0.95, - do_sample=True, - ): - - if hasattr(self.tokenizer, 'pad_token_id'): - pad_token_id = self.tokenizer.pad_token_id - else: - pad_token_id = self.tokenizer.eos_token_id - - generated_ids = model.model.generate( - **inputs, - max_new_tokens=max_new_tokens, - temperature=temperature, - top_p=top_p, - do_sample=do_sample, - eos_token_id=self.tokenizer.eos_token_id, - pad_token_id=pad_token_id, - use_cache=True, - ) - return generated_ids - - @torch.no_grad() - def generate_batch_completion(self, model, prompt, bs): - input_batch = [prompt for _ in range(bs)] - inputs = self.tokenizer(input_batch, return_tensors='pt').to(model.model.device) - input_ids_cutoff = inputs.input_ids.size(dim=1) - - generated_ids = self.generated(model, inputs) - model.reset_kv() - - batch_completions = self.tokenizer.batch_decode( - [ids[input_ids_cutoff:] for ids in generated_ids], - skip_special_tokens=True, - ) - - return [ - self.filter_code(self.fix_indents(completion)) - for completion in batch_completions - ] - - @torch.no_grad() - def post_process(self, testenc): - files = sorted(glob.glob(os.path.join(self.output_dir, 'eval.jsonl'))) - logger.info(f'{len(files)} files in {self.output_dir}') - output = [] - - for code_file in tqdm(files, total=len(files)): - codes = [c for c in stream_jsonl(code_file)] - output += codes - - out_path = os.path.join(self.output_dir, 'processed.jsonl') - logger.info(f'save to {out_path}') - write_jsonl(out_path, output) - res = self.entry_point(out_path) - return res - - @torch.no_grad() - def filter_code(self, completion): - completion = completion.lstrip('\n') - return completion.split('\n\n')[0] - - @torch.no_grad() - def fix_indents(self, text): - return text.replace('\t', ' ') - - @torch.no_grad() - def entry_point(self, sample_file): - results = evaluate_functional_correctness(sample_file) - return results diff --git a/llmc/eval/eval_custom_generate.py b/llmc/eval/eval_custom_generate.py deleted file mode 100644 index 4bcdda6b5..000000000 --- a/llmc/eval/eval_custom_generate.py +++ /dev/null @@ -1,59 +0,0 @@ -import glob -import os - -import torch -from human_eval.data import stream_jsonl, write_jsonl -from human_eval.evaluation import evaluate_functional_correctness -from loguru import logger -from tqdm import tqdm - -from .eval_base import BaseEval - - -class CustomGenerate(BaseEval): - def __init__(self, model, config): - super().__init__(model, config) - self.max_new_tokens = self.eval_cfg.get('max_new_tokens', 32) - - @torch.no_grad() - def eval_func(self, model, testenc, seq_len, bs, eval_pos): - responses = [] - for data in testenc: - data = { - k: (v.cuda() if torch.is_tensor(v) else v) - for k, v in data.items() - } - if model.mm_model: - generated_ids = model.mm_model.generate( - **data, - max_new_tokens=self.max_new_tokens, - do_sample=False - ) - else: - generated_ids = model.model.generate( - **data, - max_new_tokens=self.max_new_tokens, - do_sample=False - ) - response = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=False) - responses.append(response) - responses = self.flatten_2d_to_1d(responses) - assert len(responses) == len(self.testdata) - logger.info('CustomGenerate Results:') - for index in range(len(responses)): - print('*' * 10) - print(f'test data: {self.testdata[index]}') - print(f'model response: {responses[index]}') - print() - - for data in testenc: - data = { - k: (v.cpu() if torch.is_tensor(v) else v) - for k, v in data.items() - } - torch.cuda.empty_cache() - - return 'custom gen done.' - - def flatten_2d_to_1d(self, two_d_list): - return [item for sublist in two_d_list for item in sublist] diff --git a/llmc/eval/eval_custom_generate_just_infer.py b/llmc/eval/eval_custom_generate_just_infer.py deleted file mode 100644 index 5a7201037..000000000 --- a/llmc/eval/eval_custom_generate_just_infer.py +++ /dev/null @@ -1,67 +0,0 @@ -import json -import os - -import torch -from loguru import logger - - -class CustomGenerateJustInfer: - def __init__(self, model, config): - self.model = model - self.config = config - self.eval_cfg = config.eval - - @torch.no_grad() - def eval(self, model, eval_pos=None): - logger.info('start inference') - - with open(os.path.join(self.eval_cfg.path, 'samples.json'), 'r') as f: - questions_list = json.load(f) - - custom_samples_ans = self.model.eval_custom_samples_just_infer( - questions_list, - self.eval_cfg - ) - - self.eval_answer(custom_samples_ans) - - with open(os.path.join(self.config.save.save_path), 'w') as f: - json.dump(custom_samples_ans, f, indent=4) - - torch.cuda.empty_cache() - return 'custom gen done.' - - def eval_answer(self, data): - T1V = 0 - T1V_T2V = 0 - - def create_pairs(lst): - return [(lst[i], lst[i + 1]) for i in range(0, len(lst), 2)] - - def check_acc(gt, answer, turn): - if gt[turn].lower() in answer[turn].lower(): - return True - return False - - pair_data = create_pairs(data) - - for idx, item in enumerate(pair_data): - assert item[0]['image'] == item[1]['image'] - - pair1 = item[0] - pair2 = item[1] - - if check_acc(pair1['gt'], pair1['answer'], 0): - T1V += 1 - if check_acc(pair2['gt'], pair2['answer'], 1): - T1V_T2V += 1 - assert pair1['question'][0] == pair2['question'][1] - - if check_acc(pair2['gt'], pair2['answer'], 0): - T1V += 1 - if check_acc(pair1['gt'], pair1['answer'], 1): - T1V_T2V += 1 - assert pair2['question'][0] == pair1['question'][1] - - logger.info(f'CustomGenerateJustInfer T1V: {T1V}, T1V_T2V: {T1V_T2V}') - logger.info(f'CustomGenerateJustInfer Possibility: {T1V_T2V / T1V}') diff --git a/llmc/eval/eval_ppl.py b/llmc/eval/eval_ppl.py deleted file mode 100644 index d598218c5..000000000 --- a/llmc/eval/eval_ppl.py +++ /dev/null @@ -1,93 +0,0 @@ -import gc -from concurrent.futures import ThreadPoolExecutor - -import torch -import torch.nn as nn -from datasets import load_dataset, load_from_disk -from loguru import logger -from tqdm import tqdm - -from .eval_base import BaseEval - - -class PerplexityEval(BaseEval): - @torch.no_grad() - def eval_func(self, model, testenc, seq_len, bs, eval_pos): - testenc = testenc.input_ids - nsamples = testenc.numel() // seq_len - - nlls = [] - - # Loop through each batch - for i in range(0, nsamples, bs): - logger.info(f'index : {(i + 1) // bs}/{nsamples // bs}') - # Calculate end index - j = min(i + bs, nsamples) - - # Prepare inputs and move to gpu - inputs = testenc[:, (i * seq_len): (j * seq_len)].cuda() - inputs = inputs.reshape(j - i, seq_len) - - # Forward pass through the model - lm_logits = model.model(inputs).logits - model.reset_kv() - - shift_logits = lm_logits[:, :-1, :].contiguous() - shift_labels = inputs[:, 1:] - - # Compute loss - loss_fct = nn.CrossEntropyLoss() - loss = loss_fct( - shift_logits.reshape(-1, shift_logits.size(-1)), - shift_labels.reshape(-1), - ) - - # Calculate negative log likelihood - neg_log_likelihood = loss.float() * seq_len * (j - i) - - # Append to list of negative log likelihoods - nlls.append(neg_log_likelihood) - - # Compute perplexity - ppl = torch.exp(torch.stack(nlls).sum() / (nsamples * seq_len)) - - # Empty CUDA cache to save memory - testenc.cpu() - torch.cuda.empty_cache() - - return ppl.item() - - -class DecodePerplexityEval(BaseEval): - @torch.no_grad() - def eval_func(self, model, testenc, seq_len, bs, eval_pos): - num_eval_tokens = 0 - num_samples = 1 if self.num_samples is None else self.num_samples - loss_fn = torch.nn.CrossEntropyLoss(reduction='none') - nlls = [] - - for text in testenc[: num_samples]: - logger.info(text) - encodings = self.tokenizer(text, return_tensors='pt') - seq_len = encodings.input_ids.size(1) - logger.info(f'seq_len: {seq_len}') - pbar = tqdm(range(0, seq_len - 1)) - - for idx in pbar: - input_ids = encodings.input_ids[:, idx:idx + 1].cuda() - with torch.no_grad(): - outputs = model.model( - input_ids, - ) - logits = outputs.logits.view(-1, model.model.config.vocab_size) - label = encodings.input_ids[:, idx + 1:idx + 2].to(logits.device).view(-1) - neg_log_likelihood = loss_fn(logits, label) - nlls.append(neg_log_likelihood) - num_eval_tokens += 1 - if self.num_eval_tokens is not None and num_eval_tokens >= self.num_eval_tokens: - break - if self.num_eval_tokens is not None and num_eval_tokens >= self.num_eval_tokens: - break - model.reset_kv() - ppl = torch.exp(torch.stack(nlls).mean()) - return ppl.item() diff --git a/llmc/eval/eval_token_consist.py b/llmc/eval/eval_token_consist.py deleted file mode 100644 index 7901b34a2..000000000 --- a/llmc/eval/eval_token_consist.py +++ /dev/null @@ -1,72 +0,0 @@ -import torch -from loguru import logger - -from llmc.utils.registry_factory import MODEL_REGISTRY - -from .eval_base import BaseEval - - -class TokenConsistencyEval(BaseEval): - - @torch.no_grad() - def eval_func(self, model, testenc, seq_len, bs, eval_pos): - handles_origin = [] - model_origin = MODEL_REGISTRY[self.config.model.type](self.config) - if self.inference_per_block: - handles_origin = self.register_hooks(model_origin) - else: - if model_origin.mm_model: - model_origin.mm_model.cuda() - else: - model_origin.model.cuda() - - if model_origin.mm_model: - model_origin.mm_model.eval() - else: - model_origin.model.eval() - - testenc = testenc.input_ids - nsamples = testenc.numel() // seq_len - - consistent_tokens = 0 - total_tokens = 0 - - # Loop through each batch - for i in range(0, nsamples, bs): - logger.info(f'index : {(i + 1) // bs}/{nsamples // bs}') - # Calculate end index - j = min(i + bs, nsamples) - - # Prepare inputs and move to gpu - inputs = testenc[:, (i * seq_len): (j * seq_len)].cuda() - inputs = inputs.reshape(j - i, seq_len) - - # Forward pass through the models - logits1 = model_origin.model(inputs).logits - logits2 = model.model(inputs).logits - model.reset_kv() - - # Get predicted tokens - preds1 = torch.argmax(logits1, dim=-1) - preds2 = torch.argmax(logits2, dim=-1) - - consistent_tokens += (preds1 == preds2).sum().item() - total_tokens += preds1.numel() - - # Calculate consistency ratio - consistency_ratio = consistent_tokens / total_tokens - - # Empty CUDA cache to save memory - testenc.cpu() - torch.cuda.empty_cache() - - if model_origin.mm_model: - model_origin.mm_model.cpu() - else: - model_origin.model.cpu() - - if self.inference_per_block: - for h in handles_origin: - h.remove() - - return consistency_ratio diff --git a/llmc/eval/eval_video_generate.py b/llmc/eval/eval_video_generate.py deleted file mode 100755 index 0f99ff6c9..000000000 --- a/llmc/eval/eval_video_generate.py +++ /dev/null @@ -1,110 +0,0 @@ -import gc -import os - -import numpy as np -import torch -from diffusers.utils import export_to_video, load_image -from loguru import logger - -from llmc.utils import seed_all -from llmc.utils.registry_factory import MODEL_REGISTRY - -from .eval_base import BaseEval - - -class VideoGenerateEval(BaseEval): - - def __init__(self, model, config): - super().__init__(model, config) - self.output_video_path = self.eval_cfg.get('output_video_path', None) - assert self.output_video_path is not None - os.makedirs(self.output_video_path, exist_ok=True) - self.target_height = self.eval_cfg.get('target_height', 480) - self.target_width = self.eval_cfg.get('target_width', 832) - self.num_frames = self.eval_cfg.get('num_frames', 81) - self.guidance_scale = self.eval_cfg.get('guidance_scale', 5.0) - self.fps = self.eval_cfg.get('fps', 15) - - @torch.no_grad() - def eval(self, model_llmc, eval_pos): - seed_all(self.config.base.seed + int(os.environ['RANK'])) - model_llmc.Pipeline.to('cuda') - eval_res = self.eval_func( - model_llmc, - self.testenc, - self.eval_dataset_bs, - eval_pos, - ) - - model_llmc.Pipeline.to('cpu') - gc.collect() - torch.cuda.empty_cache() - return eval_res - - def pre_process(self, model, image_path): - image = load_image(image_path) - max_area = self.target_height * self.target_width - aspect_ratio = image.height / image.width - mod_value = model.Pipeline.vae_scale_factor_spatial * model.model.config.patch_size[1] - height = round(np.sqrt(max_area * aspect_ratio)) // mod_value * mod_value - width = round(np.sqrt(max_area / aspect_ratio)) // mod_value * mod_value - image = image.resize((width, height)) - return image, width, height - - @torch.no_grad() - def t2v_eval(self, model, testenc, bs, eval_pos): - assert bs == 1, 'Only support eval bs=1' - - for i, data in enumerate(testenc): - output = model.Pipeline( - prompt=data['prompt'], - negative_prompt=data['negative_prompt'], - height=self.target_height, - width=self.target_width, - num_frames=self.num_frames, - guidance_scale=self.guidance_scale, - ).frames[0] - export_to_video( - output, - os.path.join(self.output_video_path, f'{eval_pos}_output_{i}.mp4'), - fps=self.fps, - ) - - return None - - @torch.no_grad() - def i2v_eval(self, model, testenc, bs, eval_pos): - for i, data in enumerate(testenc): - image, width, height = self.pre_process(model, data['image']) - - output = model.Pipeline( - image=image, - prompt=data['prompt'], - negative_prompt=data['negative_prompt'], - height=height, - width=width, - num_frames=self.num_frames, - guidance_scale=self.guidance_scale, - ).frames[0] - - export_to_video( - output, - os.path.join(self.output_video_path, f'{eval_pos}_output_{i}.mp4'), - fps=self.fps, - ) - - return None - - @torch.no_grad() - def eval_func(self, model, testenc, bs, eval_pos): - assert bs == 1, 'Evaluation only supports batch size = 1.' - assert self.model_type in ['WanT2V', 'WanI2V'], ( - f"Unsupported model type '{self.model_type}'.\n" - 'Only Wan2.1 video generation models (WanT2V, WanI2V) are supported.' - ) - if self.eval_dataset_name == 't2v': - return self.t2v_eval(model, testenc, bs, eval_pos) - elif self.eval_dataset_name == 'i2v': - return self.i2v_eval(model, testenc, bs, eval_pos) - else: - raise Exception(f'Unsupported eval dataset: {self.eval_dataset_name}') diff --git a/llmc/eval/eval_vqa.py b/llmc/eval/eval_vqa.py deleted file mode 100755 index b8fd65033..000000000 --- a/llmc/eval/eval_vqa.py +++ /dev/null @@ -1,306 +0,0 @@ -import random -import time -from typing import List, Optional, Union - -import numpy as np -import torch -from lmms_eval.evaluator import evaluate -from lmms_eval.evaluator_utils import run_task_tests -from lmms_eval.loggers.evaluation_tracker import EvaluationTracker -from lmms_eval.tasks import TaskManager, get_task_dict -from lmms_eval.utils import (get_datetime_str, make_table, - simple_parse_args_string) -from loguru import logger - -from llmc.utils.registry_factory import MODEL_REGISTRY - - -class VQAEval: - def __init__(self, config): - self.eval_config = config.eval - self.model_path = config.model.path - self.eval_dataset_name = self.eval_config['name'] - if not isinstance(self.eval_dataset_name, list): - self.eval_dataset_name = [ - self.eval_dataset_name, - ] - self.eval_dataset_path = self.eval_config['path'] - self.eval_bs = self.eval_config['bs'] - - self.statistics = self.eval_config.get('statistics', False) - - def set_statistics_modules(self, model): - - def start_time_hook(module, args, kwargs): - torch.cuda.synchronize() - module.start_time = time.time() - return args, kwargs - - def end_time_hook(module, inputs, kwargs, layer_outputs): - torch.cuda.synchronize() - elapsed_prefill = time.time() - module.start_time - if kwargs['inputs_embeds'] is not None: - module.prefill_count += 1 - module.prefill_time += elapsed_prefill - else: - model.decode_count += 1 - model.decode_time += elapsed_prefill - - model.prefill_count = 0 - model.prefill_time = 0 - model.decode_time = 0 - model.decode_count = 0 - - model.register_forward_pre_hook(start_time_hook, with_kwargs=True) - - model.register_forward_hook(end_time_hook, with_kwargs=True) - - def eval( - self, - llmc_model, - eval_class: Optional[str] = None, - model_args: Optional[Union[str, dict]] = None, - tasks: Optional[List[Union[str, dict, object]]] = None, - num_fewshot: Optional[int] = None, - batch_size: Optional[Union[int, str]] = None, - max_batch_size: Optional[int] = None, - device: Optional[str] = None, - use_cache: Optional[str] = None, - cache_requests: bool = False, - rewrite_requests_cache: bool = False, - delete_requests_cache: bool = False, - limit: Optional[Union[int, float]] = None, - bootstrap_iters: int = 100000, - check_integrity: bool = False, - write_out: bool = False, - log_samples: bool = True, - evaluation_tracker: Optional[EvaluationTracker] = None, - system_instruction: Optional[str] = None, - apply_chat_template: bool = False, - fewshot_as_multiturn: bool = False, - gen_kwargs: Optional[str] = None, - task_manager: Optional[TaskManager] = None, - verbosity: str = 'INFO', - predict_only: bool = False, - random_seed: int = 0, - numpy_random_seed: int = 1234, - torch_random_seed: int = 1234, - fewshot_random_seed: int = 1234, - datetime_str: str = get_datetime_str(), - cli_args=None, - ): - # import argparse - # cli_args = argparse.Namespace( - # process_with_media=True, - # ) - - model = llmc_model.eval_name - model_args = 'pretrained=' + self.model_path + ',device_map=auto' - batch_size = self.eval_bs - tasks = self.eval_dataset_name - num_fewshot = 0 - - seed_message = [] - if random_seed is not None: - # See https://github.com/EleutherAI/lm-evaluation-harness/pull/1412 - seed_message.append(f'Setting random seed to {random_seed}') - random.seed(random_seed) - - if numpy_random_seed is not None: - seed_message.append(f'Setting numpy seed to {numpy_random_seed}') - np.random.seed(numpy_random_seed) - - if torch_random_seed is not None: - seed_message.append(f'Setting torch manual seed to {torch_random_seed}') - torch.manual_seed(torch_random_seed) - - if seed_message: - logger.info(' | '.join(seed_message)) - - assert ( - tasks != [] - ), 'No tasks specified, or no tasks found. Please verify the task names.' - - if gen_kwargs: - gen_kwargs = simple_parse_args_string(gen_kwargs) - logger.warning('generation_kwargs specified through cli.') - if gen_kwargs == '': - gen_kwargs = None - - if model_args is None: - model_args = '' - - if task_manager is None: - task_manager = TaskManager(verbosity, model_name=model) - - task_dict = get_task_dict(tasks, task_manager) - - if self.statistics: - self.set_statistics_modules(llmc_model.vlm_model) - torch.cuda.reset_peak_memory_stats() - - lm = MODEL_REGISTRY[model].create_from_arg_string( - model_args, - { - 'llmc_model': llmc_model.vlm_model, - 'batch_size': batch_size, - 'device': device, - }, - ) - # helper function to recursively apply config overrides to leaf subtasks, - # skipping their constituent groups. - # (setting of num_fewshot ; bypassing metric calculation ; setting fewshot seed) - - def _adjust_config(task_dict): - adjusted_task_dict = {} - for task_name, task_obj in task_dict.items(): - if isinstance(task_obj, dict): - adjusted_task_dict = { - **adjusted_task_dict, - **{task_name: _adjust_config(task_obj)}, - } - - else: - task_obj = task_dict[task_name] - if isinstance(task_obj, tuple): - group, task_obj = task_obj - if task_obj is None: - continue - lm.task_dict[task_name] = task_obj.dataset - if 'generate_until' in task_obj.get_config('output_type'): - if gen_kwargs is not None: - task_obj.set_config( - key='generation_kwargs', value=gen_kwargs, update=True - ) - - if predict_only: - logger.info( - f'Processing {task_name} in output-only mode. \ - Metrics will not be calculated!' - ) - # we have to change the class properties post-hoc. This is pretty hacky. - task_obj.override_metric(metric_name='bypass') - - # override tasks' fewshot values to - # the provided num_fewshot arg value - # except if tasks have it set to 0 manually in their configs--then - # we should never overwrite that - if num_fewshot is not None: - if ( - default_num_fewshot := task_obj.get_config('num_fewshot') - ) == 0: - logger.info( - f'num_fewshot has been set to 0 for {task_name} \ - in its config. Manual configuration will be ignored.' - ) - else: - logger.warning( - f'Overwriting default num_fewshot of {task_name} \ - from {default_num_fewshot} to {num_fewshot}' - ) - task_obj.set_config(key='num_fewshot', value=num_fewshot) - else: - # if num_fewshot not provided, and the task does not define a default one, - # default to 0 - if ( - default_num_fewshot := task_obj.get_config('num_fewshot') - ) is None: - task_obj.set_config(key='num_fewshot', value=0) - # fewshot_random_seed set for tasks, even with a default num_fewshot - # (e.g. in the YAML file) - task_obj.set_fewshot_seed(seed=fewshot_random_seed) - # logger.info(f"Setting fewshot random generator seed to {fewshot_random_seed}") - - adjusted_task_dict[task_name] = task_obj - - return adjusted_task_dict - - task_dict = _adjust_config(task_dict) - - if check_integrity: - run_task_tests(task_list=tasks) - - if evaluation_tracker is not None: - evaluation_tracker.general_config_tracker.log_experiment_args( - model_source=model, - model_args=model_args, - system_instruction=system_instruction, - chat_template=lm.chat_template if apply_chat_template else None, - fewshot_as_multiturn=fewshot_as_multiturn, - ) - - results = evaluate( - lm=lm, - task_dict=task_dict, - limit=limit, - cache_requests=cache_requests, - rewrite_requests_cache=rewrite_requests_cache, - bootstrap_iters=bootstrap_iters, - write_out=write_out, - log_samples=True if predict_only else log_samples, - system_instruction=system_instruction, - apply_chat_template=apply_chat_template, - fewshot_as_multiturn=fewshot_as_multiturn, - verbosity=verbosity, - cli_args=cli_args, - ) - - if self.statistics: - prefill = ( - llmc_model.vlm_model.prefill_time / llmc_model.vlm_model.prefill_count - ) - decode = ( - llmc_model.vlm_model.decode_time / llmc_model.vlm_model.decode_count - ) - gen_max_mem = torch.cuda.max_memory_allocated() / 1024 / 1024 - - logger.info(f'peak memory: {gen_max_mem:.1f} MB.') - logger.info(f'prefill average time: {prefill *1000:.1f} ms.') - logger.info(f'decode average time: {decode *1000:.1f} ms.') - - if hasattr(lm, '_model'): - del lm._model - torch.cuda.empty_cache() - - if lm.rank == 0: - if isinstance(model, str): - model_name = model - elif hasattr(model, 'config') and hasattr(model.config, '_name_or_path'): - model_name = model.config._name_or_path - else: - model_name = type(model).__name__ - - # add info about the model and few shot config - results['config'] = { - 'model': model_name, - 'model_args': model_args, - } - # add more detailed model info if available TODO: add model info - # if isinstance(lm, lm_eval.models.huggingface.HFLM): - # results["config"].update(lm.get_model_info()) - # add info about execution - results['config'].update( - { - 'batch_size': batch_size, - 'batch_sizes': ( - list(lm.batch_sizes.values()) - if hasattr(lm, 'batch_sizes') - else [] - ), - 'device': device, - 'use_cache': use_cache, - 'limit': limit, - 'bootstrap_iters': bootstrap_iters, - 'gen_kwargs': gen_kwargs, - 'random_seed': random_seed, - 'numpy_seed': numpy_random_seed, - 'torch_seed': torch_random_seed, - 'fewshot_seed': fewshot_random_seed, - } - ) - results['date'] = datetime_str - # add_env_info(results) # additional environment info to results - # add_tokenizer_info(results, lm) # additional info about tokenizer - return '\n' + make_table(results) - else: - return None diff --git a/llmc/eval/utils.py b/llmc/eval/utils.py deleted file mode 100755 index 9e414a574..000000000 --- a/llmc/eval/utils.py +++ /dev/null @@ -1,94 +0,0 @@ -import copy -import os - -from loguru import logger - -from llmc.eval import (AccuracyEval, CustomGenerate, CustomGenerateJustInfer, - DecodePerplexityEval, HumanEval, PerplexityEval, - TokenConsistencyEval, VideoGenerateEval, VQAEval) -from llmc.utils import deploy_all_modality - - -def get_eval_list(model, config): - eval_list = [] - if int(os.environ['RANK']) == 0: - if 'eval' in config: - if 'type' in config.eval and config.eval.type == 'decode_ppl': - if 'pretrain' in config.eval.eval_pos: - raise ValueError( - 'Unsupported: Evaluating decode_ppl with a pretrained model. ' - ) - # Pretrained models do not use key-value caching. - # Please use a transformed model to evaluate decode_ppl - # for the original model. - - if not isinstance(config.eval, list): - eval_config_list = [config.eval] - else: - eval_config_list = config.eval - for eval_config in eval_config_list: - config_tmp = copy.deepcopy(config) - config_tmp.eval = eval_config - if 'type' not in config_tmp.eval: - config_tmp.eval['type'] = 'ppl' - if 'eval' in config_tmp and len(config_tmp.eval.eval_pos): - name_list = ( - config_tmp.eval.name - if not isinstance(config_tmp.eval.name, str) - else [config_tmp.eval.name] - ) - for name in name_list: - config_for_eval = copy.deepcopy(config_tmp) - config_for_eval.eval.name = name - if len(name_list) != 1: # eval multi datasets - config_for_eval.eval.path = os.path.join( - config_tmp.eval.path, name - ) - if 'type' not in config_tmp.eval: - config_tmp.eval.type == 'ppl' - if config_tmp.eval.type == 'acc': - eval_class = AccuracyEval(config_for_eval) - elif config_tmp.eval.type == 'vqa': - eval_class = VQAEval(config_for_eval) - elif ( - config_tmp.eval.type == 'code' - and config_tmp.eval.name == 'human_eval' - ): - eval_class = HumanEval(model, config_for_eval) - elif config_tmp.eval.type == 'generate_only': - eval_class = CustomGenerate(model, config_for_eval) - elif config_tmp.eval.type == 'just_infer': - eval_class = CustomGenerateJustInfer(model, config_for_eval) - elif config_tmp.eval.type == 'token_acc': - eval_class = TokenConsistencyEval(model, config_for_eval) - elif config_tmp.eval.type == 'ppl': - eval_class = PerplexityEval(model, config_for_eval) - elif config_tmp.eval.type == 'decode_ppl': - eval_class = DecodePerplexityEval(model, config_for_eval) - elif config_tmp.eval.type == 'video_gen': - eval_class = VideoGenerateEval(model, config_for_eval) - else: - raise ValueError( - f'Unsupported eval type: {config_tmp.eval.type}' - ) - eval_list.append((eval_class, config_for_eval)) - return eval_list - - -def eval_model(model, blockwise_opts, eval_list, eval_pos): - if int(os.environ['RANK']) == 0: - do_eval = False - for _, config_for_eval in eval_list: - if eval_pos in config_for_eval.eval.eval_pos: - do_eval = True - if do_eval: - if eval_pos == 'transformed': - deploy_all_modality(blockwise_opts, 'origin_float') - elif eval_pos in ['fake_quant', 'fake_quant_wo_kv']: - deploy_all_modality(blockwise_opts, 'fake_quant') - for eval_class, config_for_eval in eval_list: - if eval_pos in config_for_eval.eval.eval_pos: - res = eval_class.eval(model, eval_pos) - eval_name = config_for_eval.eval.type - dataset_name = config_for_eval.eval.name - logger.info(f'EVAL: {eval_name} on {dataset_name} is {res}') diff --git a/llmc/models/__init__.py b/llmc/models/__init__.py deleted file mode 100755 index 48586cf25..000000000 --- a/llmc/models/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -from .bloom import Bloom -from .chatglm import ChatGLM -from .deepseekv2 import DeepseekV2 -from .deepseekv3 import DeepseekV3 -from .falcon import Falcon -from .gemma2 import Gemma2 -from .glm4v import GLM4V -from .internlm2 import InternLM2 -from .internomni import InternOmni -from .internvl2 import InternVL2 -from .llama import Llama -from .llava import Llava -from .llava_hf import LlavaHf -from .llava_onevision import Llava_OneVision -from .minicpm import MiniCPM -from .minicpmv import MiniCPMV -from .mistral import Mistral -from .mixtral import Mixtral -from .mllama import Mllama -from .opt import Opt -from .phi import Phi -from .phi3 import Phi3 -from .qwen import Qwen -from .qwen2 import Qwen2 -from .qwen2_5vl import Qwen2_5VL -from .qwen2audio import Qwen2Audio -from .qwen2moe import Qwen2Moe -from .qwen2vl import Qwen2VL -from .smollm import SmolLM -from .stablelm import StableLm -from .starcoder import Starcoder -from .videollava import VideoLLaVA -from .vila import Vila -from .vit import Vit -from .wan_i2v import WanI2V -from .wan_t2v import WanT2V diff --git a/llmc/models/base_model.py b/llmc/models/base_model.py deleted file mode 100755 index 6f6a563b2..000000000 --- a/llmc/models/base_model.py +++ /dev/null @@ -1,496 +0,0 @@ -import gc -import inspect -import json -import os -from abc import ABCMeta, abstractmethod -from collections import defaultdict - -import torch -import torch.nn as nn -from accelerate import init_empty_weights -from loguru import logger -from safetensors import safe_open -from torch.nn import functional as F -from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer - -from llmc.compression.quantization.module_utils import ( - _LLMC_LINEAR_TYPES_, _LLMC_LN_TYPES_, _TRANSFORMERS_LINEAR_TYPES_, - _TRANSFORMERS_LN_TYPES_, LlmcFp8Linear, VllmQuantLinearFp8, - VllmQuantLinearInt8) - - -class BaseModel(metaclass=ABCMeta): - def __init__(self, config, device_map=None, use_cache=False): - self.config = config - self.model_type = self.config.model.type - self.model_path = self.config.model.path - self.tokenizer_mode = self.config.model.get('tokenizer_mode', 'fast') - self.use_cpu_to_save_cuda_mem_for_catcher = self.config.model.get('use_cpu_to_save_cuda_mem_for_catcher', False) # noqa - torch_dtype = self.config.model.torch_dtype - self.torch_dtype = torch_dtype if torch_dtype in ['auto'] else eval(torch_dtype) - self.block_wise_quant = self.config.model.get('block_wise_quant', False) - if self.block_wise_quant: - assert self.torch_dtype == torch.float8_e4m3fn - self.device_map = device_map - self.use_cache = use_cache - self.mm_model = None - self.vision_model = None - self.vision_projector = None - self.audio_model = None - self.audio_projector = None - self.modality = 'language' - self.kvcache_buffer = [] - self.build_tokenizer() - self.build_model() - try: - self.model.eval() - except: # noqa - pass - self.update_key_info() - if self.mm_model: - self.mm_model.eval() - - def set_modality(self, modality='language'): - assert modality in ['audio', 'vision', 'language', 'video_gen'] - self.modality = modality - self.update_key_info() - - def get_modality(self): - assert self.modality in ['audio', 'vision', 'language'] - return self.modality - - def update_key_info(self): - self.find_blocks() - self.find_embed_layers() - self.find_block_name() - self.add_layernorms_class() - - def reset_kv(self): - for kvcache in self.kvcache_buffer: - kvcache._reset_states() - - @abstractmethod - def find_blocks(self): - pass - - def find_block_name(self): - pass - - def get_model(self): - return self.model - - def get_extra_rot_module_besides_embed_layers(self): - return [] - - def get_blocks(self): - return self.blocks - - @abstractmethod - def find_embed_layers(self): - pass - - @abstractmethod - def get_embed_layers(self): - pass - - @abstractmethod - def get_layers_except_blocks(self): - pass - - def get_matmul_in_block(self): - return {} - - def get_act_fn_in_block(self): - return {} - - def get_softmax_in_block(self): - return {} - - @abstractmethod - def get_subsets_in_block(self, block): - pass - - @abstractmethod - def skip_layer_name(self): - pass - - @abstractmethod - def has_bias(self): - pass - - def build_tokenizer(self): - if self.model_type not in ['Vit', 'WanT2V', 'WanI2V']: - assert self.tokenizer_mode in ['fast', 'slow'] - self.tokenizer = AutoTokenizer.from_pretrained( - self.model_path, use_fast=self.tokenizer_mode, trust_remote_code=True - ) - if 'Intern' in self.model_type: - self.tokenizer.padding_side = 'left' - if self.tokenizer.pad_token is None: - self.tokenizer.pad_token = self.tokenizer.eos_token - else: - self.tokenizer = None - - def get_tokenizer(self): - return self.tokenizer - - def get_attention_rotary_layers(self): - return [] - - def get_num_attention_heads(self): - return self.model_config.num_attention_heads - - def apply_chat_template(self, prompt): - messages = [ - {'role': 'system', 'content': 'You are a helpful assistant.'}, - {'role': 'user', 'content': prompt} - ] - text = self.tokenizer.apply_chat_template( - messages, - tokenize=False, - add_generation_prompt=True - ) - return text - - def batch_process(self, samples, calib_or_eval='eval', apply_chat_template=False, return_inputs=True): # noqa - assert calib_or_eval == 'calib' or calib_or_eval == 'eval' - texts = [] - for idx in range(len(samples)): - question = samples[idx]['question'] - if apply_chat_template: - question = self.apply_chat_template(question) - texts.append(question) - if not return_inputs: - return texts - model_inputs = self.tokenizer(texts, return_tensors='pt', padding=True) - input_ids = model_inputs['input_ids'] - attention_mask = model_inputs['attention_mask'] - inputs = { - 'input_ids': input_ids, - 'attention_mask': attention_mask, - } - return inputs - - def get_catcher(self, first_block_input): - class Catcher(nn.Module): - def __init__(self, module): - super().__init__() - self.module = module - self.signature = inspect.signature(module.forward) - - def forward(self, *args, **kwargs): - params = list(self.signature.parameters.keys()) - for i, arg in enumerate(args): - if i > 0: - kwargs[params[i]] = arg - first_block_input['data'].append(args[0]) - if 'output_router_logits' in kwargs: - assert kwargs['output_router_logits'] is False - kwargs.pop('output_router_logits') - first_block_input['kwargs'].append(kwargs) - raise ValueError - return Catcher - - def __str__(self): - return f'\nConfig: \n{str(self.model_config)} \nModel: \n{str(self.model)}' - - def build_model(self): - self.model_config = AutoConfig.from_pretrained( - self.model_path, trust_remote_code=True - ) - if not self.use_cache: - if hasattr(self.model_config, 'use_cache'): - self.model_config.use_cache = False - logger.info(f'self.model_config : {self.model_config}') - if self.torch_dtype in [torch.float8_e4m3fn, torch.int8]: - with init_empty_weights(): - self.model = AutoModelForCausalLM.from_config(config=self.model_config, - torch_dtype=torch.float16, - trust_remote_code=True) - self.find_blocks() - if self.torch_dtype == torch.float8_e4m3fn: - if self.block_wise_quant: - self.fp8_block_size \ - = self.model_config.quantization_config['weight_block_size'][0] - params_dict = {'block_size': self.fp8_block_size} - quant_linear_cls = LlmcFp8Linear - else: - params_dict = {} - quant_linear_cls = VllmQuantLinearFp8 - elif self.torch_dtype == torch.int8: - params_dict = {} - quant_linear_cls = VllmQuantLinearInt8 - - for block_idx, block in enumerate(self.blocks): - self.replace_module_block(quant_linear_cls, - block, - block_idx, - params_dict) - - self.load_quant_weight() - - logger.info(f'fp8 block size: {self.fp8_block_size}') - else: - self.model = AutoModelForCausalLM.from_pretrained( - self.model_path, - config=self.model_config, - device_map=self.device_map, - trust_remote_code=True, - torch_dtype=self.torch_dtype, - low_cpu_mem_usage=True, - ) - logger.info(f'self.model : {self.model}') - - def load_quant_weight(self): - state_dict = self.model.state_dict() - model_index_file = os.path.join(self.model_path, 'model.safetensors.index.json') - - with open(model_index_file, 'r') as f: - model_index = json.load(f) - weight_map = model_index['weight_map'] - - shard_to_tensors = defaultdict(list) - for weight_name in state_dict: - shard_path = weight_map[weight_name] - shard_to_tensors[shard_path].append(weight_name) - - for shard_path, tensor_names in shard_to_tensors.items(): - full_shard_path = os.path.join(self.model_path, shard_path) - logger.info(f'Loading Quant shard: {full_shard_path}') - with safe_open(full_shard_path, framework='pt', device='cpu') as f: - for weight_name in tensor_names: - tensor = f.get_tensor(weight_name) - state_dict[weight_name] = tensor - self.model.load_state_dict(state_dict, assign=True) - - def add_layernorms_class(self): - ln_class_list = [] - single_block = self.blocks[0] - ln_dict = self.get_layernorms_in_block(single_block) - for ln_name in ln_dict: - ln_class = ln_dict[ln_name].__class__ - if ln_class not in ln_class_list: - ln_class_list.append(ln_class) - for ln_class in ln_class_list: - if ln_class not in _TRANSFORMERS_LN_TYPES_: - _TRANSFORMERS_LN_TYPES_.append(ln_class) - logger.info(f'_TRANSFORMERS_LN_TYPES_ : {_TRANSFORMERS_LN_TYPES_}') - - @torch.no_grad() - def collect_first_block_input(self, calib_data, padding_mask=None): - first_block_input = defaultdict(list) - - Catcher = self.get_catcher(first_block_input) - - if not self.use_cpu_to_save_cuda_mem_for_catcher: - self.move_embed_to_device('cuda') - if self.vision_model: - self.vision_model.cuda() - if self.vision_projector: - self.vision_projector.cuda() - if self.audio_model: - self.audio_model.cuda() - if self.audio_projector: - self.audio_projector.cuda() - self.blocks[0] = self.blocks[0].cuda() - self.blocks[0] = Catcher(self.blocks[0]) - - for data in calib_data: - data = { - k: (v.cuda() if torch.is_tensor(v) else v) - for k, v in data.items() - } - try: - if not self.mm_model: - self.model(**data) - else: - self.mm_model.generate(**data, max_new_tokens=128, do_sample=False) - except ValueError: - pass - self.first_block_input = first_block_input - assert len(self.first_block_input) > 0, 'Catch input data failed.' - if padding_mask: - for idx in range(len(self.first_block_input['data'])): - token_num = self.first_block_input['data'][idx].shape[1] - if token_num != padding_mask[idx].shape[1]: - padding_mask[idx] = F.pad( - padding_mask[idx], - self.get_one_pad_setting( - self.tokenizer.padding_side, - token_num - padding_mask[idx].shape[1] - ), - value=1 - ) - self.padding_mask = padding_mask - if not self.use_cpu_to_save_cuda_mem_for_catcher: - if self.vision_model: - self.vision_model.cpu() - if self.vision_projector: - self.vision_projector.cpu() - if self.audio_model: - self.audio_model.cpu() - if self.audio_projector: - self.audio_projector.cpu() - self.blocks[0] = self.blocks[0].cpu() - self.move_embed_to_device('cpu') - self.blocks[0] = self.blocks[0].module - - def get_one_pad_setting(self, padding_side, length): - if padding_side == 'left': - return [0, length] - elif padding_side == 'right': - return [length, 0] - else: - raise Exception(f'Not support padding_side: {padding_side}.') - - def get_first_block_input(self): - return self.first_block_input - - def get_padding_mask(self): - return self.padding_mask - - def get_model_config(self): - return self.model_config - - def move_embed_to_device(self, device): - for embed_layer in self.get_embed_layers(): - embed_layer.to(device) - for attention_rotary_layer in self.get_attention_rotary_layers(): - attention_rotary_layer.to(device) - - def get_block_linears(self, block): - return { - name: m - for name, m in block.named_modules() - if isinstance(m, tuple(_LLMC_LINEAR_TYPES_ + _TRANSFORMERS_LINEAR_TYPES_)) - } - - def get_all_linears(self, module): - return { - name: m - for name, m in module.named_modules() - if isinstance(m, tuple(_LLMC_LINEAR_TYPES_ + _TRANSFORMERS_LINEAR_TYPES_)) - } - - def get_extra_modules(self, block): - return {} - - def get_moe_gate(self, block): - return None - - def replace_vision_module_all(self, module, params_dict, keep_device=False): - vision_model_linears = self.get_block_linears(self.vision_model) - for name, m in vision_model_linears.items(): - M = module.new(m, **params_dict) - - name_tmp = name.rsplit('.', 1) - if len(name_tmp) == 2: - parent_name = name_tmp[0] - parent = self.vision_model.get_submodule(parent_name) - child_name = name_tmp[1] - elif len(name_tmp) == 1: - parent = self.vision_model - child_name = name_tmp[0] - - setattr(parent, child_name, M) - - gc.collect() - torch.cuda.empty_cache() - logger.info(f'The Replaced vision_model: {self.vision_model}') - - def replace_language_module_all(self, module, params_dict, keep_device=False): - for block_idx in range(len(self.blocks)): - logger.info(f'Replace block index: {block_idx}/{len(self.blocks)}') - if keep_device: - self.replace_module_block(module, self.blocks[block_idx], block_idx, params_dict) - else: - self.blocks[block_idx].cuda() - self.replace_module_block(module, self.blocks[block_idx], block_idx, params_dict) - self.blocks[block_idx].cpu() - gc.collect() - torch.cuda.empty_cache() - logger.info(f'The Replaced model: {self.model}') - - def replace_video_gen_module_all(self, module, params_dict, keep_device=False): - for block_idx in range(len(self.blocks)): - logger.info(f'Replace block index: {block_idx}/{len(self.blocks)}') - if keep_device: - self.replace_module_block(module, self.blocks[block_idx], block_idx, params_dict) - else: - self.blocks[block_idx].cuda() - self.replace_module_block(module, self.blocks[block_idx], block_idx, params_dict) - self.blocks[block_idx].cpu() - gc.collect() - torch.cuda.empty_cache() - logger.info(f'The Replaced model: {self.model}') - - def replace_module_block(self, module, block, block_idx, params_dict): - if module in _LLMC_LN_TYPES_ + _TRANSFORMERS_LN_TYPES_: - self.replace_module_layernorm( - module, block, self.get_layernorms_in_block(block), block_idx, params_dict - ) - else: - self.replace_module_subset(module, - block, - {'layers': self.get_block_linears(block)}, - block_idx, - params_dict) - - def replace_module_subset(self, module, block, subset, block_idx, params_dict): - if module in _LLMC_LINEAR_TYPES_ + _TRANSFORMERS_LINEAR_TYPES_: - layers_dict = { - name: layer for name, layer in subset['layers'].items() - if isinstance(layer, tuple(_LLMC_LINEAR_TYPES_ + _TRANSFORMERS_LINEAR_TYPES_)) - } - else: - layers_dict = subset['layers'] - - for name, m in layers_dict.items(): - if hasattr(m, 'no_quant') and m.no_quant: - continue - - M = module.new(m, **params_dict) - - name_tmp = name.rsplit('.', 1) - if len(name_tmp) == 2: - parent_name = name_tmp[0] - parent = block.get_submodule(parent_name) - child_name = name_tmp[1] - elif len(name_tmp) == 1: - parent = block - child_name = name_tmp[0] - - setattr(parent, child_name, M) - del M - - logger.info(f'replace >>> {name} in {block_idx}-th block') - - del layers_dict - gc.collect() - torch.cuda.empty_cache() - - def replace_module_layernorm(self, module, block, lns, i, params_dict): - for name, m in lns.items(): - if isinstance(m, module): - continue - M = module.new(m, **params_dict) - - name_tmp = name.rsplit('.', 1) - if len(name_tmp) == 2: - parent_name = name_tmp[0] - parent = block.get_submodule(parent_name) - child_name = name_tmp[1] - elif len(name_tmp) == 1: - parent = block - child_name = name_tmp[0] - - setattr(parent, child_name, M) - del M - - del lns - gc.collect() - torch.cuda.empty_cache() - - def convert_dtype(self, dtype='torch.float16'): - for i in range(len(self.blocks)): - self.blocks[i] = self.blocks[i].to(dtype) diff --git a/llmc/models/bloom.py b/llmc/models/bloom.py deleted file mode 100644 index 50d2c2ef8..000000000 --- a/llmc/models/bloom.py +++ /dev/null @@ -1,80 +0,0 @@ -from llmc.utils.registry_factory import MODEL_REGISTRY - -from .base_model import BaseModel - - -@MODEL_REGISTRY -class Bloom(BaseModel): - def __init__(self, config, device_map=None, use_cache=False): - super().__init__(config, device_map, use_cache) - - def find_blocks(self): - self.blocks = self.model.transformer.h - - def find_embed_layers(self): - self.word_embeddings = self.model.transformer.word_embeddings - self.word_embeddings_layernorm = ( - self.model.transformer.word_embeddings_layernorm - ) - - def find_block_name(self): - self.block_name_prefix = 'model.transformer.h' - - def get_embed_layers(self): - return [self.word_embeddings, self.word_embeddings_layernorm] - - def get_layers_except_blocks(self): - return [ - self.word_embeddings, - self.word_embeddings_layernorm, - self.model.lm_head, - self.model.transformer.ln_f, - ] - - def skip_layer_name(self): - return ['lm_head'] - - def has_bias(self): - return True - - def get_layernorms_in_block(self, block): - return { - 'input_layernorm': block.input_layernorm, - 'post_attention_layernorm': block.post_attention_layernorm, - } - - def get_subsets_in_block(self, block): - return [ - { - 'layers': { - 'self_attention.query_key_value': ( - block.self_attention.query_key_value - ) - }, - 'prev_op': [block.input_layernorm], - 'input': ['self_attention.query_key_value'], - 'inspect': block.self_attention.query_key_value, - 'has_kwargs': False, - }, - { - 'layers': {'self_attention.dense': block.self_attention.dense}, - 'prev_op': [block.self_attention.query_key_value], - 'input': ['self_attention.dense'], - 'inspect': block.self_attention.dense, - 'has_kwargs': False, - }, - { - 'layers': {'mlp.dense_h_to_4h': block.mlp.dense_h_to_4h}, - 'prev_op': [block.post_attention_layernorm], - 'input': ['mlp.dense_h_to_4h'], - 'inspect': block.mlp.dense_h_to_4h, - 'has_kwargs': False, - }, - { - 'layers': {'mlp.dense_4h_to_h': block.mlp.dense_4h_to_h}, - 'prev_op': [block.mlp.gelu_impl], - 'input': ['mlp.dense_4h_to_h'], - 'inspect': block.mlp.dense_4h_to_h, - 'has_kwargs': False, - }, - ] diff --git a/llmc/models/chatglm.py b/llmc/models/chatglm.py deleted file mode 100644 index 5b61e1270..000000000 --- a/llmc/models/chatglm.py +++ /dev/null @@ -1,88 +0,0 @@ -import inspect - -import torch.nn as nn - -from llmc.utils.registry_factory import MODEL_REGISTRY - -from .base_model import BaseModel - - -@MODEL_REGISTRY -class ChatGLM(BaseModel): - def __init__(self, config, device_map=None, use_cache=False): - super().__init__(config, device_map, use_cache) - - def find_blocks(self): - self.blocks = self.model.transformer.encoder.layers - - def find_embed_layers(self): - self.embedding = self.model.transformer.embedding - self.rotary_pos_emb = self.model.transformer.rotary_pos_emb - - def find_block_name(self): - self.block_name_prefix = 'transformer.encoder.layers' - - def get_embed_layers(self): - return [self.embedding] - - def get_attention_rotary_layers(self): - return [self.rotary_pos_emb] - - def get_head_layers(self): - return [self.model.transformer.output_layer] - - def get_pre_head_layernorm_layers(self): - return [self.model.transformer.encoder.final_layernorm] - - def get_layers_except_blocks(self): - return [self.embedding, self.rotary_pos_emb, self.model.transformer.output_layer, self.model.transformer.encoder.final_layernorm] # noqa - - def skip_layer_name(self): - return ['final_layernorm'] - - def has_bias(self): - return False - - def get_layernorms_in_block(self, block): - return { - 'input_layernorm': block.input_layernorm, - 'post_attention_layernorm': block.post_attention_layernorm, - } - - def get_subsets_in_block(self, block): - return [ - { - 'layers': { - 'self_attention.query_key_value': block.self_attention.query_key_value - }, - 'prev_op': [block.input_layernorm], - 'input': ['self_attention.query_key_value'], - 'inspect': block.self_attention, - 'has_kwargs': True, - }, - { - 'layers': {'self_attention.dense': block.self_attention.dense}, - 'prev_op': [block.self_attention.query_key_value], - 'input': ['self_attention.dense'], - 'inspect': block.self_attention.dense, - 'has_kwargs': False, - }, - { - 'layers': { - 'mlp.dense_h_to_4h': block.mlp.dense_h_to_4h - }, - 'prev_op': [block.post_attention_layernorm], - 'input': ['mlp.dense_h_to_4h'], - 'inspect': block.mlp, - 'has_kwargs': False, - 'is_mlp': True, - }, - { - 'layers': {'mlp.down_proj': block.mlp.dense_4h_to_h}, - 'prev_op': [block.mlp.dense_h_to_4h], - 'input': ['mlp.dense_4h_to_h'], - 'inspect': block.mlp.dense_4h_to_h, - 'has_kwargs': False, - 'is_mlp': True, - }, - ] diff --git a/llmc/models/deepseekv2.py b/llmc/models/deepseekv2.py deleted file mode 100644 index 09ee2eefe..000000000 --- a/llmc/models/deepseekv2.py +++ /dev/null @@ -1,190 +0,0 @@ -from llmc.utils.registry_factory import MODEL_REGISTRY - -from .base_model import BaseModel - - -@MODEL_REGISTRY -class DeepseekV2(BaseModel): - def __init__(self, config, device_map=None, use_cache=False): - super().__init__(config, device_map, use_cache) - - def find_blocks(self): - self.blocks = self.model.model.layers - - def find_embed_layers(self): - self.embed_tokens = self.model.model.embed_tokens - - def find_block_name(self): - self.block_name_prefix = 'model.layers' - - def get_embed_layers(self): - return [self.embed_tokens] - - def get_layers_except_blocks(self): - return [self.embed_tokens, self.model.model.norm, self.model.lm_head] - - def get_extra_modules(self, block): - return { - 'mlp': block.mlp - } - - def skip_layer_name(self): - return ['lm_head'] - - def has_bias(self): - return False - - def get_layernorms_in_block(self, block): - return { - 'input_layernorm': block.input_layernorm, - 'post_attention_layernorm': block.post_attention_layernorm, - } - - def get_attn_in_block(self, block): - return {'self_attn': block.self_attn} - - def get_matmul_in_block(self, block): - return { - 'self_attn.matmul_1': block.self_attn.matmul_1, - 'self_attn.matmul_2': block.self_attn.matmul_2, - } - - def get_softmax_in_block(self, block): - return {'self_attn.softmax': block.self_attn.softmax} - - def get_head_layers(self): - return [self.model.lm_head] - - def get_pre_head_layernorm_layers(self): - return [self.model.model.norm] - - def get_moe_gate(self, block): - if hasattr(block.mlp, 'gate'): - return {'mlp.gate': block.mlp.gate} - else: - return None - - def get_subsets_in_block(self, block): - layers = [] - if hasattr(block.self_attn, 'q_proj'): - layers.append( - { - 'layers': { - 'self_attn.q_proj': block.self_attn.q_proj, # noqa - 'self_attn.kv_a_proj_with_mqa': block.self_attn.kv_a_proj_with_mqa, # noqa - }, - 'prev_op': [block.input_layernorm], - 'input': ['self_attn.q_proj'], - 'inspect': block.self_attn, - 'has_kwargs': True, - } - ) - else: - layers.append( - { - 'layers': { - 'self_attn.q_a_proj': block.self_attn.q_a_proj, - 'self_attn.kv_a_proj_with_mqa': block.self_attn.kv_a_proj_with_mqa, # noqa - }, - 'prev_op': [block.input_layernorm], - 'input': ['self_attn.q_a_proj'], - 'inspect': block.self_attn, - 'has_kwargs': True, - } - ) - layers.append( - { - 'layers': {'self_attn.q_b_proj': block.self_attn.q_b_proj}, - 'prev_op': [block.self_attn.q_a_layernorm], - 'input': ['self_attn.q_b_proj'], - 'inspect': block.self_attn.q_b_proj, - 'has_kwargs': False, - 'skip_rotate': True, - } - ) - - layers.append( - { - 'layers': {'self_attn.o_proj': block.self_attn.o_proj}, - 'prev_op': [None], - 'input': ['self_attn.o_proj'], - 'inspect': block.self_attn.o_proj, - 'has_kwargs': False, - }, - ) - layers.append( - { - 'layers': {'self_attn.kv_b_proj': block.self_attn.kv_b_proj}, - 'prev_op': [block.self_attn.kv_a_layernorm], - 'input': ['self_attn.kv_b_proj'], - 'inspect': block.self_attn.kv_b_proj, - 'has_kwargs': False, - 'skip_rotate': True - } - ) - - if hasattr(block.mlp, 'gate'): - layers.append( - { - 'layers': { - **{f'mlp.experts.{i}.gate_proj': block.mlp.experts[i].gate_proj - for i in range(len(block.mlp.experts))}, - **{f'mlp.experts.{i}.up_proj': block.mlp.experts[i].up_proj - for i in range(len(block.mlp.experts))}, - 'mlp.shared_experts.gate_proj': block.mlp.shared_experts.gate_proj, # noqa - 'mlp.shared_experts.up_proj': block.mlp.shared_experts.up_proj, - 'mlp.gate': block.mlp.gate, - }, - 'prev_op': [block.post_attention_layernorm], - 'input': ['mlp'], - 'inspect': block.mlp, - 'has_kwargs': False, - 'is_mlp': True, - } - ) - for i in range(len(block.mlp.experts)): - layers.append( - { - 'layers': {f'mlp.experts.{i}.down_proj': block.mlp.experts[i].down_proj}, # noqa - 'prev_op': [block.mlp.experts[i].up_proj], - 'input': [f'mlp.experts.{i}.down_proj'], - 'inspect': block.mlp.experts[i].down_proj, - 'has_kwargs': False, - 'is_mlp': True, - } - ) - - layers.append( - { - 'layers': {'mlp.shared_experts.down_proj': block.mlp.shared_experts.down_proj}, # noqa - 'prev_op': [block.mlp.shared_experts.up_proj], - 'input': ['mlp.shared_experts.down_proj'], - 'inspect': block.mlp.shared_experts.down_proj, - 'has_kwargs': False, - } - ) - else: - layers.append( - { - 'layers': { - 'mlp.gate_proj': block.mlp.gate_proj, - 'mlp.up_proj': block.mlp.up_proj, - }, - 'prev_op': [block.post_attention_layernorm], - 'input': ['mlp.gate_proj'], - 'inspect': block.mlp, - 'has_kwargs': False, - } - ) - - layers.append( - { - 'layers': {'mlp.down_proj': block.mlp.down_proj}, - 'prev_op': [block.mlp.up_proj], - 'input': ['mlp.down_proj'], - 'inspect': block.mlp.down_proj, - 'has_kwargs': False, - } - ) - - return layers diff --git a/llmc/models/deepseekv3.py b/llmc/models/deepseekv3.py deleted file mode 100755 index 457fa36ed..000000000 --- a/llmc/models/deepseekv3.py +++ /dev/null @@ -1,192 +0,0 @@ -from collections import defaultdict - -from llmc.utils.registry_factory import MODEL_REGISTRY - -from .base_model import BaseModel - - -@MODEL_REGISTRY -class DeepseekV3(BaseModel): - def __init__(self, config, device_map=None, use_cache=False): - super().__init__(config, device_map, use_cache) - - def find_blocks(self): - self.blocks = self.model.model.layers - - def find_embed_layers(self): - self.embed_tokens = self.model.model.embed_tokens - - def find_block_name(self): - self.block_name_prefix = 'model.layers' - - def get_embed_layers(self): - return [self.embed_tokens] - - def get_layers_except_blocks(self): - return [self.embed_tokens, self.model.model.norm, self.model.lm_head] - - def get_extra_modules(self, block): - return { - 'mlp': block.mlp - } - - def skip_layer_name(self): - return ['lm_head'] - - def has_bias(self): - return False - - def get_layernorms_in_block(self, block): - return { - 'input_layernorm': block.input_layernorm, - 'post_attention_layernorm': block.post_attention_layernorm, - } - - def get_attn_in_block(self, block): - return {'self_attn': block.self_attn} - - def get_matmul_in_block(self, block): - return { - 'self_attn.matmul_1': block.self_attn.matmul_1, - 'self_attn.matmul_2': block.self_attn.matmul_2, - } - - def get_softmax_in_block(self, block): - return {'self_attn.softmax': block.self_attn.softmax} - - def get_head_layers(self): - return [self.model.lm_head] - - def get_pre_head_layernorm_layers(self): - return [self.model.model.norm] - - def get_moe_gate(self, block): - if hasattr(block.mlp, 'gate'): - return {'mlp.gate': block.mlp.gate} - else: - return None - - def get_subsets_in_block(self, block): - layers = [] - if hasattr(block.self_attn, 'q_proj'): - layers.append( - { - 'layers': { - 'self_attn.q_proj': block.self_attn.q_proj, # noqa - 'self_attn.kv_a_proj_with_mqa': block.self_attn.kv_a_proj_with_mqa, # noqa - }, - 'prev_op': [block.input_layernorm], - 'input': ['self_attn.q_proj'], - 'inspect': block.self_attn, - 'has_kwargs': True, - } - ) - else: - layers.append( - { - 'layers': { - 'self_attn.q_a_proj': block.self_attn.q_a_proj, - 'self_attn.kv_a_proj_with_mqa': block.self_attn.kv_a_proj_with_mqa, # noqa - }, - 'prev_op': [block.input_layernorm], - 'input': ['self_attn.q_a_proj'], - 'inspect': block.self_attn, - 'has_kwargs': True, - } - ) - layers.append( - { - 'layers': {'self_attn.q_b_proj': block.self_attn.q_b_proj}, - 'prev_op': [block.self_attn.q_a_layernorm], - 'input': ['self_attn.q_b_proj'], - 'inspect': block.self_attn.q_b_proj, - 'has_kwargs': False, - 'skip_rotate': True, - } - ) - - layers.append( - { - 'layers': {'self_attn.o_proj': block.self_attn.o_proj}, - 'prev_op': [None], - 'input': ['self_attn.o_proj'], - 'inspect': block.self_attn.o_proj, - 'has_kwargs': False, - }, - ) - layers.append( - { - 'layers': {'self_attn.kv_b_proj': block.self_attn.kv_b_proj}, - 'prev_op': [block.self_attn.kv_a_layernorm], - 'input': ['self_attn.kv_b_proj'], - 'inspect': block.self_attn.kv_b_proj, - 'has_kwargs': False, - 'skip_rotate': True - } - ) - - if hasattr(block.mlp, 'gate'): - layers.append( - { - 'layers': { - **{f'mlp.experts.{i}.gate_proj': block.mlp.experts[i].gate_proj - for i in range(len(block.mlp.experts))}, - **{f'mlp.experts.{i}.up_proj': block.mlp.experts[i].up_proj - for i in range(len(block.mlp.experts))}, - 'mlp.shared_experts.gate_proj': block.mlp.shared_experts.gate_proj, # noqa - 'mlp.shared_experts.up_proj': block.mlp.shared_experts.up_proj, - 'mlp.gate': block.mlp.gate, - }, - 'prev_op': [block.post_attention_layernorm], - 'input': ['mlp'], - 'inspect': block.mlp, - 'has_kwargs': False, - 'is_mlp': True, - } - ) - for i in range(len(block.mlp.experts)): - layers.append( - { - 'layers': {f'mlp.experts.{i}.down_proj': block.mlp.experts[i].down_proj}, # noqa - 'prev_op': [block.mlp.experts[i].up_proj], - 'input': [f'mlp.experts.{i}.down_proj'], - 'inspect': block.mlp.experts[i].down_proj, - 'has_kwargs': False, - 'is_mlp': True, - } - ) - - layers.append( - { - 'layers': {'mlp.shared_experts.down_proj': block.mlp.shared_experts.down_proj}, # noqa - 'prev_op': [block.mlp.shared_experts.up_proj], - 'input': ['mlp.shared_experts.down_proj'], - 'inspect': block.mlp.shared_experts.down_proj, - 'has_kwargs': False, - } - ) - else: - layers.append( - { - 'layers': { - 'mlp.gate_proj': block.mlp.gate_proj, - 'mlp.up_proj': block.mlp.up_proj, - }, - 'prev_op': [block.post_attention_layernorm], - 'input': ['mlp.gate_proj'], - 'inspect': block.mlp, - 'has_kwargs': False, - } - ) - - layers.append( - { - 'layers': {'mlp.down_proj': block.mlp.down_proj}, - 'prev_op': [block.mlp.up_proj], - 'input': ['mlp.down_proj'], - 'inspect': block.mlp.down_proj, - 'has_kwargs': False, - } - ) - - return layers diff --git a/llmc/models/falcon.py b/llmc/models/falcon.py deleted file mode 100644 index e0823dc22..000000000 --- a/llmc/models/falcon.py +++ /dev/null @@ -1,113 +0,0 @@ -from llmc.utils.registry_factory import MODEL_REGISTRY - -from .base_model import BaseModel - - -@MODEL_REGISTRY -class Falcon(BaseModel): - def __init__(self, config, device_map=None, use_cache=False): - super().__init__(config, device_map, use_cache) - - def find_blocks(self): - self.blocks = self.model.transformer.h - - def find_embed_layers(self): - self.word_embeddings = self.model.transformer.word_embeddings - self.rotary_emb = self.model.model.rotary_emb - - def find_block_name(self): - self.block_name_prefix = 'model.transformer.h' - - def get_embed_layers(self): - return [self.word_embeddings] - - def get_attention_rotary_layers(self): - return [self.rotary_emb] - - def get_layers_except_blocks(self): - return [self.word_embeddings, self.rotary_emb, self.model.transformer.ln_f] - - def has_bias(self): - return False - - def get_layernorms_in_block(self, block): - if block.config.architectures[0] == 'RWForCausalLM': - new_decoder_architecture = False - elif block.config.architectures[0] == 'FalconForCausalLM': - new_decoder_architecture = True - if new_decoder_architecture: - return {'ln_attn': block.ln_attn, 'ln_mlp': block.ln_mlp} - else: - if block.config.parallel_attn: - return {'input_layernorm': block.input_layernorm} - else: - return {'post_attention_layernorm': block.post_attention_layernorm} - - def get_subsets_in_block(self, block): - if block.config.architectures[0] == 'RWForCausalLM': - new_decoder_architecture = False - elif block.config.architectures[0] == 'FalconForCausalLM': - new_decoder_architecture = True - if new_decoder_architecture: - subset1 = { - 'layers': { - 'self_attention.query_key_value': ( - block.self_attention.query_key_value - ) - }, - 'prev_op': [block.ln_attn], - 'input': ['self_attention.query_key_value'], - 'inspect': block.self_attention.query_key_value, - 'has_kwargs': False, - } - subset3 = { - 'layers': {'mlp.dense_h_to_4h': block.mlp.dense_h_to_4h}, - 'prev_op': [block.ln_mlp], - 'input': ['mlp.dense_h_to_4h'], - 'inspect': block.mlp.dense_h_to_4h, - 'has_kwargs': False, - } - else: - subset1 = { - 'layers': { - 'self_attention.query_key_value': ( - block.self_attention.query_key_value - ) - }, - 'prev_op': [block.input_layernorm], - 'input': ['self_attention.query_key_value'], - 'inspect': block.self_attention.query_key_value, - 'has_kwargs': False, - } - if block.config.parallel_attn: - subset3 = { - 'layers': {'mlp.dense_h_to_4h': block.mlp.dense_h_to_4h}, - 'prev_op': [block.input_layernorm], - 'input': ['mlp.dense_h_to_4h'], - 'inspect': block.mlp.dense_h_to_4h, - 'has_kwargs': False, - } - else: - subset3 = { - 'layers': {'mlp.dense_h_to_4h': block.mlp.dense_h_to_4h}, - 'prev_op': [block.post_attention_layernorm], - 'input': ['mlp.dense_h_to_4h'], - 'inspect': block.mlp.dense_h_to_4h, - 'has_kwargs': False, - } - - subset2 = { - 'layers': {'self_attention.dense': block.self_attention.dense}, - 'prev_op': [block.self_attention.query_key_value], - 'input': ['self_attention.dense'], - 'inspect': block.self_attention.dense, - 'has_kwargs': False, - } - subset4 = { - 'layers': {'mlp.dense_4h_to_h': block.mlp.dense_4h_to_h}, - 'prev_op': [block.mlp.act], - 'input': ['mlp.dense_4h_to_h'], - 'inspect': block.mlp.dense_4h_to_h, - 'has_kwargs': False, - } - return [subset1, subset2, subset3, subset4] diff --git a/llmc/models/gemma2.py b/llmc/models/gemma2.py deleted file mode 100644 index 47ff5bc7d..000000000 --- a/llmc/models/gemma2.py +++ /dev/null @@ -1,102 +0,0 @@ -from types import MethodType - -import torch.nn as nn -from loguru import logger -from transformers.models.gemma2.modeling_gemma2 import Gemma2RMSNorm - -from llmc.utils.registry_factory import MODEL_REGISTRY - -from .base_model import BaseModel - - -def gemma2_rms_norm_forward(self, x): - output = self._norm(x.float()) - output = output * self.weight.float() - return output.type_as(x) - - -@MODEL_REGISTRY -class Gemma2(BaseModel): - def __init__(self, config, device_map=None, use_cache=False): - super().__init__(config, device_map, use_cache) - for m in self.model.modules(): - if isinstance(m, Gemma2RMSNorm): - w = m.weight.data - del m.weight - m.weight = nn.Parameter(w + 1.0) - m.forward = MethodType(gemma2_rms_norm_forward, m) - - def find_blocks(self): - self.blocks = self.model.model.layers - - def find_embed_layers(self): - self.embed_tokens = self.model.model.embed_tokens - - def find_block_name(self): - self.block_name_prefix = 'model.layers' - self.pairs = {'q_proj': 'qkv', 'o_proj': 'out', 'up_proj': 'fc1'} - - def get_embed_layers(self): - return [self.embed_tokens] - - def get_head_layers(self): - return [self.model.lm_head] - - def get_pre_head_layernorm_layers(self): - return [self.model.model.norm] - - def get_layers_except_blocks(self): - return [self.embed_tokens, self.model.model.norm, self.model.lm_head] - - def skip_layer_name(self): - return ['lm_head'] - - def has_bias(self): - return False - - def get_layernorms_in_block(self, block): - return { - 'input_layernorm': block.input_layernorm, - 'pre_feedforward_layernorm': block.pre_feedforward_layernorm, - } - - def get_subsets_in_block(self, block): - return [ - { - 'layers': { - 'self_attn.q_proj': block.self_attn.q_proj, - 'self_attn.k_proj': block.self_attn.k_proj, - 'self_attn.v_proj': block.self_attn.v_proj, - }, - 'prev_op': [block.input_layernorm], - 'input': ['self_attn.q_proj'], - 'inspect': block.self_attn, - 'has_kwargs': True, - }, - { - 'layers': {'self_attn.o_proj': block.self_attn.o_proj}, - 'prev_op': [block.self_attn.v_proj], - 'input': ['self_attn.o_proj'], - 'inspect': block.self_attn.o_proj, - 'has_kwargs': False, - }, - { - 'layers': { - 'mlp.gate_proj': block.mlp.gate_proj, - 'mlp.up_proj': block.mlp.up_proj, - }, - 'prev_op': [block.pre_feedforward_layernorm], - 'input': ['mlp.gate_proj'], - 'inspect': block.mlp, - 'has_kwargs': False, - 'is_mlp': True, - }, - { - 'layers': {'mlp.down_proj': block.mlp.down_proj}, - 'prev_op': [block.mlp.up_proj], - 'input': ['mlp.down_proj'], - 'inspect': block.mlp.down_proj, - 'has_kwargs': False, - 'is_mlp': True, - }, - ] diff --git a/llmc/models/glm4v.py b/llmc/models/glm4v.py deleted file mode 100644 index d4156f9fa..000000000 --- a/llmc/models/glm4v.py +++ /dev/null @@ -1,77 +0,0 @@ -from loguru import logger -from PIL import Image -from transformers import AutoConfig, AutoModelForCausalLM - -from llmc.utils.registry_factory import MODEL_REGISTRY - -from .chatglm import ChatGLM - - -@MODEL_REGISTRY -class GLM4V(ChatGLM): - def __init__(self, config, device_map=None, use_cache=False): - super().__init__(config, device_map, use_cache) - - def build_model(self): - self.vlm_model_config = AutoConfig.from_pretrained( - self.model_path, trust_remote_code=True - ) - if not self.use_cache: - self.vlm_model_config.use_cache = False - logger.info(f'self.vlm_model_config : {self.vlm_model_config}') - self.vlm_model = AutoModelForCausalLM.from_pretrained( - self.model_path, - config=self.vlm_model_config, - torch_dtype=self.torch_dtype, - low_cpu_mem_usage=True, - trust_remote_code=True, - ) - self.mm_model = self.vlm_model - logger.info(f'self.vlm_model : {self.vlm_model}') - self.vision_model = self.vlm_model.transformer.vision - self.vision_projector = self.vlm_model.transformer.vision.linear_proj - self.model = self.vlm_model - self.model_config = self.vlm_model_config - - def get_extra_rot_module_besides_embed_layers(self): - return [self.vision_projector.dense_4h_to_h] - - def batch_process(self, img_qas, calib_or_eval='eval', apply_chat_template=True, return_inputs=True): # noqa - assert calib_or_eval == 'calib' or calib_or_eval == 'eval' - assert apply_chat_template - assert return_inputs, 'return_inputs should be True for GLM4V.' - messages = [] - answers = [] - for idx in range(len(img_qas)): - img_path = img_qas[idx]['image'] - if img_path is not None: - image = Image.open(img_path).convert('RGB') - message = [ - { - 'role': 'user', - 'image': image, - 'content': img_qas[idx]['question'], - } - ] - else: - message = [{'role': 'user', 'content': img_qas[idx]['question']}] - messages.append(message) - answers.append(img_qas[idx]['answer']) - inputs = self.tokenizer.apply_chat_template( - messages, - add_generation_prompt=True, - tokenize=True, - return_tensors='pt', - return_dict=True, - padding=True, - ) - if calib_or_eval == 'calib' and self.config['calib'].get('add_answer', False): - raise Exception( - 'glm4v not support add_answer. ' - 'Maybe you can modify tokenization_chatglm.py in model path.' - ) - if calib_or_eval == 'calib': - logger.info(f'Calib data is:\n{inputs}') - - inputs = inputs.to(next(self.vlm_model.parameters()).dtype) - return inputs diff --git a/llmc/models/internlm2.py b/llmc/models/internlm2.py deleted file mode 100644 index 35afe557a..000000000 --- a/llmc/models/internlm2.py +++ /dev/null @@ -1,112 +0,0 @@ -from typing import List, Tuple - -from llmc.compression.quantization.module_utils import _TRANSFORMERS_LN_TYPES_ -from llmc.utils.registry_factory import MODEL_REGISTRY - -from .base_model import BaseModel - - -@MODEL_REGISTRY -class InternLM2(BaseModel): - def __init__(self, config, device_map=None, use_cache=False): - super().__init__(config, device_map, use_cache) - global _TRANSFORMERS_LN_TYPES_ - _TRANSFORMERS_LN_TYPES_ += [type(self.model.model.norm)] - - def find_blocks(self): - self.blocks = self.model.model.layers - - def find_embed_layers(self): - self.tok_embeddings = self.model.model.tok_embeddings - - def find_block_name(self): - self.block_name_prefix = 'model.layers' - - def get_embed_layers(self): - return [self.tok_embeddings] - - def get_head_layers(self): - return [self.model.output] - - def get_pre_head_layernorm_layers(self): - return [self.model.model.norm] - - def get_layers_except_blocks(self): - return [self.tok_embeddings, self.model.model.norm, self.model.output] - - def get_attn_in_block(self, block): - return {'attention': block.attention} - - def skip_layer_name(self): - return ['lm_head'] - - def has_bias(self): - return False - - def get_layernorms_in_block(self, block): - return { - 'attention_norm': block.attention_norm, - 'ffn_norm': block.ffn_norm, - } - - # flake8: noqa - def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, meta_instruction=''): - if history is None: - history = [] - if tokenizer.add_bos_token: - prompt = '' - else: - prompt = tokenizer.bos_token - if meta_instruction: - prompt += f"""<|im_start|>system\n{meta_instruction}<|im_end|>\n""" - for record in history: - prompt += f"""<|im_start|>user\n{record[0]}<|im_end|>\n<|im_start|>assistant\n{record[1]}<|im_end|>\n""" - prompt += f"""<|im_start|>user\n{query}<|im_end|>\n<|im_start|>assistant\n""" - return prompt - - # flake8: noqa - def apply_chat_template(self, prompt): - meta_instruction = 'You are an AI assistant whose name is InternLM (书生·浦语).\n' - '- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory ' - '(上海人工智能实验室). It is designed to be helpful, honest, and harmless.\n' - '- InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such ' - 'as English and 中文.' - text = self.build_inputs(self.tokenizer, prompt, history=[], meta_instruction=meta_instruction) - return text - - def get_subsets_in_block(self, block): - return [ - { - 'layers': {'attention.wqkv': block.attention.wqkv}, - 'prev_op': [block.attention_norm], - 'input': ['attention.wqkv'], - 'inspect': block.attention, - 'has_kwargs': True, - }, - { - 'layers': {'attention.wo': block.attention.wo}, - 'prev_op': [block.attention.wqkv], - 'input': ['attention.wo'], - 'inspect': block.attention.wo, - 'has_kwargs': False, - }, - { - 'layers': { - 'feed_forward.w3': block.feed_forward.w3, - 'feed_forward.w1': block.feed_forward.w1, - }, - 'prev_op': [block.ffn_norm], - 'input': ['feed_forward.w1'], - 'inspect': block.feed_forward, - 'has_kwargs': False, - 'is_mlp': True, - }, - { - 'layers': {'feed_forward.w2': block.feed_forward.w2}, - 'prev_op': [block.feed_forward.w3], - 'input': ['feed_forward.w2'], - 'inspect': block.feed_forward.w2, - 'has_kwargs': False, - 'is_mlp': True, - }, - ] diff --git a/llmc/models/internomni.py b/llmc/models/internomni.py deleted file mode 100644 index e43cc3e31..000000000 --- a/llmc/models/internomni.py +++ /dev/null @@ -1,322 +0,0 @@ -from typing import Optional - -import librosa -import torch -from loguru import logger -from transformers import GenerationConfig - -try: - from internvl.conversation import get_conv_template - from internvl.model.audio.processing_whisper import WhisperProcessor - from internvl.model.internvl_chat import (InternVLChatAudioConfig, - InternVLChatAudioModel) -except Exception: - logger.warning( - 'InternOmni-internvl not installed. ' - 'If you need it, please install it.' - ) - -from llmc.utils.registry_factory import MODEL_REGISTRY - -from .internvl2 import load_image - - -def load_audio(audio_file, audio_processor): - audio_values, _ = librosa.load(audio_file, sr=16000) - - audio_process_values = audio_processor( - audio_values, sampling_rate=16000, return_tensors='pt' - ) - input_features = audio_process_values['input_features'] - audio_len_after_cnn = audio_process_values['audio_len_after_cnn'] - audio_token_num = audio_process_values['audio_token_num'] - - audio_input = { - 'audio_values': input_features, - 'audio_len_after_cnn': audio_len_after_cnn, - 'audio_token_num': audio_token_num, - } - return audio_input - - -@torch.no_grad() -def generate_patch_for_internvl_qwen2( - self, - pixel_values: torch.FloatTensor, - input_ids: torch.FloatTensor, - attention_mask: torch.LongTensor, - visual_features: Optional[torch.FloatTensor] = None, - audio_values: Optional[torch.FloatTensor] = None, - audio_len_after_cnn: Optional[bool] = None, - audio_token_num: Optional[bool] = None, - generation_config: Optional[GenerationConfig] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - **generate_kwargs, -) -> torch.LongTensor: - - assert self.img_context_token_id is not None - assert self.audio_context_token_id is not None - - vit_embeds = None - if visual_features is not None: - vit_embeds = visual_features - elif pixel_values is not None: - vit_embeds = self.extract_feature(pixel_values) - - input_embeds = self.language_model.get_input_embeddings()(input_ids) - B, N, C = input_embeds.shape - input_embeds = input_embeds.reshape(B * N, C) - - input_ids = input_ids.reshape(B * N) - - if vit_embeds is not None: - selected = (input_ids == self.img_context_token_id) - input_embeds[selected] = vit_embeds.reshape(-1, C) - - if audio_values is not None and audio_len_after_cnn is not None and audio_token_num is not None: - audio_embeds = self.extract_audio_feature(audio_values, audio_len_after_cnn) - output_audios = [] - for i in range(len(audio_token_num)): - token_num = int(audio_token_num[i].item()) - audio = audio_embeds[i][:token_num] - output_audios.append(audio) - output_audios = torch.cat(output_audios, dim=0) - selected = (input_ids == self.audio_context_token_id) - input_embeds[selected] = output_audios.reshape(-1, C) - - input_embeds = input_embeds.reshape(B, N, C) - - outputs = self.language_model.generate( - inputs_embeds=input_embeds, - attention_mask=attention_mask, - generation_config=generation_config, - output_hidden_states=output_hidden_states, - use_cache=True, - **generate_kwargs, - ) - - return outputs - - -@MODEL_REGISTRY -class InternOmni(): - def __new__(cls, config, device_map=None, use_cache=False): - avlm_model_config = InternVLChatAudioConfig.from_pretrained( - config.model.path, trust_remote_code=True - ) - language_part = avlm_model_config.llm_config.model_type - logger.warning(f'InternOmni language_part: {language_part}') - if language_part == 'internlm2': - from .internlm2 import InternLM2 - - class NewClass(InternOmniSharedBehavior, InternLM2): - def __init__(self, config, device_map=None, use_cache=False): - super().__init__(config, device_map, use_cache) - elif language_part == 'qwen2': - from .qwen2 import Qwen2 - - class NewClass(InternOmniSharedBehavior, Qwen2): - def __init__(self, config, device_map=None, use_cache=False): - super().__init__(config, device_map, use_cache) - setattr( - self.avlm_model, - 'generate', - generate_patch_for_internvl_qwen2.__get__(self.avlm_model), - ) - elif language_part == 'phi3': - from .phi3 import Phi3 - - class NewClass(InternOmniSharedBehavior, Phi3): - def __init__(self, config, device_map=None, use_cache=False): - super().__init__(config, device_map, use_cache) - elif language_part == 'llama': - from .llama import Llama - - class NewClass(InternOmniSharedBehavior, Llama): - def __init__(self, config, device_map=None, use_cache=False): - super().__init__(config, device_map, use_cache) - else: - raise Exception(f'Not support for language_part: {language_part}') - return NewClass(config, device_map, use_cache) - - -@MODEL_REGISTRY -class InternOmniSharedBehavior(): - def __init__(self, config, device_map=None, use_cache=False): - super().__init__(config, device_map, use_cache) - - def build_model(self): - self.avlm_model_config = InternVLChatAudioConfig.from_pretrained( - self.model_path, trust_remote_code=True - ) - logger.info(f'self.avlm_model_config : {self.avlm_model_config}') - self.avlm_model = InternVLChatAudioModel.from_pretrained( - self.model_path, - config=self.avlm_model_config, - trust_remote_code=True, - torch_dtype=self.torch_dtype, - low_cpu_mem_usage=True, - ) - self.mm_model = self.avlm_model - logger.info(f'self.avlm_model : {self.avlm_model}') - self.model = self.avlm_model.language_model - self.model_config = self.avlm_model_config.llm_config - if not self.use_cache: - if hasattr(self.model_config, 'use_cache'): - self.model_config.use_cache = False - - self.audio_model = self.avlm_model.audio_model - self.vision_model = self.avlm_model.vision_model - self.vision_projector = self.avlm_model.mlp1 - self.audio_projector = self.avlm_model.mlp2 - - IMG_CONTEXT_TOKEN = '' - AUDIO_CONTEXT_TOKEN = '' - img_context_token_id = self.tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN) - audio_context_token_id = self.tokenizer.convert_tokens_to_ids(AUDIO_CONTEXT_TOKEN) - - self.avlm_model.img_context_token_id = img_context_token_id - self.avlm_model.audio_context_token_id = audio_context_token_id - self.avlm_model.ps_version = 'v2' - - self.audio_processor = WhisperProcessor.from_pretrained(self.model_path) - - self.default_image_prompt_template = { - 'single': '\n', - 'multiple': 'Image-<|idx|>: \n' - } - self.default_audio_prompt_template = { - 'single': '