diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000..a2c2e033 --- /dev/null +++ b/.flake8 @@ -0,0 +1,4 @@ +[flake8] +exclude = .git,.venv,__pycache__,docs/source/conf.py,old,build,dist +max-line-length = 120 +extend-ignore = E722, E203 diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 00000000..3cdd7ed3 --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,39 @@ +# Welcome to 42 norminette contributing guide + +We appreciate your interest in contributing to our project.
+Please take a moment to read through this guide to understand how you can contribute effectively. + +## Issue Tracker + +Before starting your contribution, we recommend checking our issue tracker [here](https://github.com/42School/norminette/issues).
+It lists the current tasks, bug reports, and feature requests. +If you find an open issue that you'd like to work on, please comment on it to let us know. + +If you encounter a new issue or have a feature request that is not already listed, please open a new issue following the given template. + +## Getting Started + +To contribute to this project, follow these steps: + +1. Fork the repository to your GitHub account. +2. Clone the forked repository to your local machine. +3. Create a new branch for your contribution. Choose a descriptive name related to the task you'll be working on. +4. Make your changes, write your code, and commit them with clear and concise commit messages. +5. Push your branch to your forked repository on GitHub. +6. Open a pull request (PR) from your branch to the main repository's branch. If you work on an open issue please mention it. +7. Provide a clear title and description for your PR, explaining the changes you made. +8. Our team will review your code, provide feedback, and discuss any necessary changes. +9. Make the requested changes, if applicable, and push the updates to your branch. +10. Once your PR is approved, it will be merged into the main repository. + +## Code Guidelines + +- Use the flake8 linter to check your code for errors. +- Try to keep your code easy to understand. +- Write unit tests for new code or modify existing tests to maintain test coverage. +- Run the existing tests and ensure they pass before submitting your contribution. +- Code which not pass flake8 or the test will not be reviewed. + +Thank you for your interest in contributing to our project.
+Your contributions are valuable and greatly appreciated.
+Happy coding! diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 6679211b..4b8864d5 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -15,9 +15,7 @@ Please use markdown to send the code here. **Additional infos** - - OS: - - python --version: - - norminette -v: +Copy and paste the output of `norminette --version` command here. **Additional context** Add any other context about the problem here. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..fd3d071d --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,16 @@ +## Issues Fixed + +- Closes #[issue number] +- Related to #[issue number] (if applicable) + +## Verification Steps +Please ensure the following steps have been completed: +- [ ] Added new tests to cover the changes. +- [ ] Fixed all broken tests. +- [ ] Ran `poetry run flake8` to check for linting issues. +- [ ] Verified that all unit tests are passing: + - [ ] Ran `poetry run pytest` to ensure unit tests pass. + - [ ] Ran `poetry run tox` to validate compatibility across Python versions. + +## Additional Notes + diff --git a/.github/workflows/check-i18n.yml b/.github/workflows/check-i18n.yml new file mode 100644 index 00000000..4616c06a --- /dev/null +++ b/.github/workflows/check-i18n.yml @@ -0,0 +1,45 @@ +name: Check I18N Changes + +on: + pull_request: + branches: + - master + +jobs: + check-i18n: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository (PR branch) + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: 0 + + - name: Install system dependencies + run: sudo apt-get update && sudo apt-get install -y gettext + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install poetry + poetry install + + - name: Run I18N module + run: | + poetry run python -m norminette.i18n + + - name: Check for uncommitted .po changes + run: | + if git diff --ignore-matching-lines='^"POT-Creation-Date:' -- '*.po' | grep -P '^[+-](?![+-]{2} [ab]/)' > /dev/null; then + echo "Meaningful I18N changes detected. Please run 'python -m norminette/i18n.py' and commit the changes." + git diff -- '*.po' + exit 1 + else + echo "No changes detected in I18N files." + fi diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index a6d01254..00c790bb 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -5,29 +5,41 @@ name: Python package on: push: - branches: [ master ] + branches: [master] pull_request: - branches: [ master ] + branches: [master] jobs: build: - runs-on: ubuntu-latest strategy: + fail-fast: false matrix: - python-version: [3.7, 3.8, 3.9] + python-version: ["3.10", "3.11", "3.12", "3.13"] + timeout-minutes: 5 steps: - - uses: actions/checkout@v2 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - python setup.py develop --user - - name: Tester - run: | - cd norminette && sh run_test.sh + - uses: actions/checkout@v2 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install poetry + poetry install + + - name: Build package + run: | + poetry build + + - name: Run linter + run: | + poetry run flake8 + + - name: Run tests + run: | + poetry run pytest diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml index 172678ff..1116d7cf 100644 --- a/.github/workflows/python-publish.yml +++ b/.github/workflows/python-publish.yml @@ -17,22 +17,21 @@ jobs: - name: Set up Python uses: actions/setup-python@v2 with: - python-version: '3.9' + python-version: '3.10' - name: Install dependencies run: | + sudo apt-get update && sudo apt-get install -y gettext python -m pip install --upgrade pip - pip install setuptools wheel twine + pip install setuptools wheel twine poetry + - name: Install norminette package + run: | + poetry install + - name: Compile .mo files + run: | + poetry run python norminette/i18n.py - name: Build and publish env: TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} run: | - python setup.py sdist - twine upload dist/* - - name: Build and publish to test_pypi - env: - TWINE_USERNAME: ${{ secrets.PYPI_TEST_USERNAME }} - TWINE_PASSWORD: ${{ secrets.PYPI_TEST_PASSWORD }} - run: | - python setup.py sdist - twine upload --repository testpypi dist/* \ No newline at end of file + poetry publish --build --username $TWINE_USERNAME --password $TWINE_PASSWORD diff --git a/.gitignore b/.gitignore index 66929449..88543120 100644 --- a/.gitignore +++ b/.gitignore @@ -12,4 +12,15 @@ build/* *.fdb_latexmk *.fls *.log -.eggs \ No newline at end of file +.eggs +bundle/ +pdf/*.out +pdf/*.toc +resources/ +dist/ + +*_cache +.venv +.tox +*.pot +**/*.mo diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index e69de29b..00000000 diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 62a440b3..00000000 --- a/.travis.yml +++ /dev/null @@ -1,29 +0,0 @@ -os: - - linux - -dist: bionic - -language: python -python: - - 3.7 - - -before_script: - - python -V - -jobs: - include: - - stage: Linting - script: - - pip install -r requirements.txt - - pycodestyle $(find norminette/ -d -name "*.py") - - stage: Unit tests - script: - - cd norminette && python -m unittest discover tests/lexer/unit-tests "*.py" - - stage: Lexer tests - script: - - python -m tests.lexer.files.file_token_test - - python -m tests.lexer.errors.tester - - stage: Rule tests - script: - - python -m tests.rules.rule_tester diff --git a/Dockerfile b/Dockerfile index cd525aaa..d1a5201e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,12 +1,14 @@ -FROM python:3.7 +FROM python:3.13-alpine WORKDIR /usr/src/norminette -COPY . . +COPY pyproject.toml poetry.lock README.md ./ +COPY norminette/ ./norminette/ -RUN pip3 install -r requirements.txt \ - && python3 setup.py install +RUN pip3 install --no-cache-dir 'poetry>=2,<3' --root-user-action=ignore \ + && poetry build \ + && pip3 install dist/*.whl --root-user-action=ignore WORKDIR /code -ENTRYPOINT ["norminette"] \ No newline at end of file +ENTRYPOINT ["norminette"] diff --git a/README.md b/README.md index 0e53af6e..be409e35 100644 --- a/README.md +++ b/README.md @@ -1,53 +1,124 @@ # norminette for 42 schools -## Install: +## Install -requires python3.7+ (3.7, 3.8, 3.9) +Norminette requires Python >=3.10. + +### Directly inside your global commands Install using pip. ```shell -pip install norminette +python3 -m pip install -U norminette +``` + +Install using pipx. +```shell +sudo apt update +sudo apt install python3-setuptools +sudo apt install pipx +pipx install norminette +pipx ensurepath ``` + +Install using a virtual environment. +```shell +python3 -m venv $HOME/.venv +source $HOME/.venv/bin/activate +python3 -m pip install --upgrade pip setuptools +python3 -m pip install norminette +echo "export PATH=\$PATH:$HOME/.venv/bin" >> $HOME/.${SHELL##/bin/}rc +deactivate +``` + To upgrade an existing install, use ```shell -pip install --upgrade norminette +python3 -m pip install --upgrade norminette ``` ## Usage +- Runs on the current folder and any subfolder: + ``` norminette ``` -Runs on the current folder and any subfolder + +- Runs on the given filename(s): ``` norminette filename.[c/h] ``` -Runs on the given filename(s) + +- Prevents stopping on various blocking errors: ``` norminette -d ``` -Prevents stopping on various blocking errors + +- Outputs all the debug logging: ``` norminette -dd ``` -Outputs all the debug logging ## Docker usage ``` docker build -t norminette . cd ~/42/ft_printf -docker run -v $PWD:/code norminette /code +docker run --rm -v $PWD:/code norminette ``` If you encounter an error or an incorrect output, you can: - - Open an issue on github + - Open an issue on github - Post a message on the dedicated slack channel (#norminette-v3-beta) - + Please try to include as much information as possible (the file on which it crashed, etc) Feel free to do pull requests if you want to help as well. Make sure that run_test.sh properly runs after your modifications. + +## Run for development + +This new version uses poetry as a dependency manager. + +If you want to contribute: + +```shell +poetry install + +# Run dev norminette +poetry run norminette + +# Or... with virtual env +source .venv/bin/activate +norminette + +# Run tests +poetry run pytest +``` + +## Github action + +Workflow example to check code with github action : + +```yaml +--- +name: Norminette + +on: + push: + +jobs: + check-norminette: + name: Norminette + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Norminette + uses: 42School/norminette@ + with: + args: '-RCheckForbiddenSourceHeader' +``` diff --git a/action.yml b/action.yml new file mode 100644 index 00000000..aeabefa2 --- /dev/null +++ b/action.yml @@ -0,0 +1,19 @@ +--- +name: 'norminette-action' +author: '42 School' +description: 'It is the official github action for 42 school norminette' +branding: + icon: 'check' + color: 'gray-dark' +inputs: + args: + description: 'Args passed to norminette' + required: false + default: '.' +runs: + using: 'docker' + image: 'Dockerfile' + entrypoint: 'sh' + args: + - '-c' + - "norminette ${{ inputs.args }}" diff --git a/deploy.sh b/deploy.sh deleted file mode 100644 index 2fa7476a..00000000 --- a/deploy.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -SUFFIX="" - -if [ "$#" -gt "0" ] -then - SUFFIX="-dev" -fi - -BUILD_DIR="build" -PKG_ROOT="$BUILD_DIR/pkgroot" -PACKAGE_NAME="norminette$SUFFIX" -DESCRIPTION="Norminette" -VERSION=`cat norminette/version.py | cut -d'"' -f2` -OUTFILE="norminette_$VERSION$SUFFIX.pkg" -SUBDIRECTORY="apps/norminette" - -rm $OUTFILE - -rm -rf $BUILD_DIR -mkdir $BUILD_DIR -python3 -m venv $BUILD_DIR/venv -source $BUILD_DIR/venv/bin/activate -python3 setup.py install -sed -i '' 's#/Users/.*/venv#/usr/share/norminette/venv#' $BUILD_DIR/venv/bin/* -deactivate - -mkdir -p $PKG_ROOT/usr/share/norminette -mv $BUILD_DIR/venv $PKG_ROOT/usr/share/norminette/venv - -pkgbuild --identifier $PACKAGE_NAME --version $VERSION --root $PKG_ROOT --install-location / $OUTFILE -rm -rf $BUILD_DIR -rm -rf dist - - diff --git a/norminette/__init__.py b/norminette/__init__.py index e69de29b..503d22be 100644 --- a/norminette/__init__.py +++ b/norminette/__init__.py @@ -0,0 +1,4 @@ +__version__ = "3.3.59" +__name__ = "norminette" +__author__ = "42" +__author__email__ = "pedago@42.fr" diff --git a/norminette/__main__.py b/norminette/__main__.py index f45e80a9..3272f36f 100644 --- a/norminette/__main__.py +++ b/norminette/__main__.py @@ -1,106 +1,150 @@ -import sys -import glob -import os -file_dir = os.path.dirname(__file__) -sys.path.append(file_dir) import argparse -import pkg_resources -from lexer import Lexer, TokenError -from exceptions import CParsingError -from registry import Registry -from context import Context -from tools.colors import colors -import _thread -from threading import Thread, Event -from multiprocessing import Process, Queue -import time -#import sentry_sdk -#from sentry_sdk import configure_scope -from version import __version__ +import glob +import pathlib +import platform +import subprocess +import sys +from importlib.metadata import version + +from norminette.context import Context +from norminette.errors import formatters +from norminette.exceptions import CParsingError +from norminette.file import File +from norminette.lexer import Lexer +from norminette.registry import Registry +from norminette.tools.colors import colors -has_err = False +version_text = f"norminette {version('norminette')}" +version_text += f", Python {platform.python_version()}" +version_text += f", {platform.platform()}" -def timeout(e, timeval=5): - time.sleep(timeval) - if e.is_set(): - return - #sentry_sdk.capture_exception(Exception(TimeoutError)) - _thread.interrupt_main() def main(): parser = argparse.ArgumentParser() - parser.add_argument("file", help="File(s) or folder(s) you wanna run the parser on. If no file provided, runs on current folder.", default=[], action='append', nargs='*') - parser.add_argument("-d", "--debug", action="count", help="Debug output (multiple values available)", default=0) - parser.add_argument('-v', '--version', action='version', version='norminette ' + str(__version__)) - #parser.add_argument('-s', '--sentry', action='store_true', default=False) - parser.add_argument('--cfile', action='store', help="Store C file content directly instead of filename") - parser.add_argument('--hfile', action='store', help="Store header file content directly instead of filename") + parser.add_argument( + "file", + help="File(s) or folder(s) you wanna run the parser on. If no file provided, runs on current folder.", + nargs="*", + ) + parser.add_argument( + "-d", + "--debug", + action="count", + help="Debug output (-dd outputs the whole tokenization and such, used for developping)", + default=0, + ) + parser.add_argument( + "-o", + "--only-filename", + action="store_true", + help="By default norminette displays the full path to the file, this allows to show only filename", + default=False, + ) + parser.add_argument( + "-v", + "--version", + action="version", + version=version_text, + ) + parser.add_argument( + "--cfile", + action="store", + help="Store C file content directly instead of filename", + ) + parser.add_argument( + "--hfile", + action="store", + help="Store header file content directly instead of filename", + ) + parser.add_argument( + "--filename", + action="store", + help="Stores filename if --cfile or --hfile is passed", + ) + parser.add_argument( + "--use-gitignore", + action="store_true", + help="Parse only source files not match to .gitignore", + ) + parser.add_argument( + "-f", + "--format", + choices=list(formatter.name for formatter in formatters), + help="formatting style for errors", + default="humanized", + ) + parser.add_argument( + "--no-colors", action="store_true", help="Disable colors in output" + ) + parser.add_argument("-R", nargs=1, help="compatibility for norminette 2") args = parser.parse_args() registry = Registry() - targets = [] - has_err = None - content = None + format = next(filter(lambda it: it.name == args.format, formatters)) + files = [] debug = args.debug - #if args.sentry == True: - #sentry_sdk.init("https://e67d9ba802fe430bab932d7b11c9b028@sentry.42.fr/72") - if args.cfile != None or args.hfile != None: - targets = ['file.c'] if args.cfile else ['file.h'] - content = args.cfile if args.cfile else args.hfile + if args.cfile or args.hfile: + file_name = args.filename or ("file.c" if args.cfile else "file.h") + file_data = args.cfile if args.cfile else args.hfile + file = File(file_name, file_data) + files.append(file) else: - args.file = args.file[0] - if args.file == [[]] or args.file == []: - targets = glob.glob("**/*.[ch]", recursive=True) - target = targets.sort() - else: - for arg in args.file: - if os.path.exists(arg) is False: - print(f"'{arg}' no such file or directory") - elif os.path.isdir(arg): - if arg[-1] != '/': - arg = arg + '/' - targets.extend(glob.glob(arg + '**/*.[ch]', recursive=True)) - elif os.path.isfile(arg): - targets.append(arg) - event = [] - for target in targets: - if target[-2:] not in [".c", ".h"]: - print(f"{target} is not valid C or C header file") - else: - #with configure_scope() as scope: - # scope.set_extra("File", target) - try: - event.append(Event()) - #if args.sentry == True: - # proc = Thread(target=timeout, args=(event[-1], 5, )) - # proc.daemon = True - # proc.start() - if content == None: - with open(target) as f: - #print ("Running on", target) - source = f.read() - else: - source = content - lexer = Lexer(source) - tokens = lexer.get_tokens() - context = Context(target, tokens, debug) - registry.run(context, source) - event[-1].set() - if context.errors: - has_err = True - # except (TokenError, CParsingError) as e: - except TokenError as e: - has_err = True - print(target + f": KO!\n\t{colors(e.msg, 'red')}") - event[-1].set() - except CParsingError as e: - has_err = True - print(target + f": KO!\n\t{colors(e.msg, 'red')}") - event[-1].set() - except KeyboardInterrupt as e: - event[-1].set() + stack = [] + stack += args.file if args.file else glob.glob("**/*.[ch]", recursive=True) + for item in stack: + path = pathlib.Path(item) + if not path.exists(): + print(f"Error: '{path!s}' no such file or directory") sys.exit(1) - sys.exit(1 if has_err else 0) + if path.is_file(): + if path.suffix not in (".c", ".h"): + print(f"Error: {path.name!r} is not valid C or C header file") + else: + file = File(item) + files.append(file) + if path.is_dir(): + stack += glob.glob(str(path) + "/**/*.[ch]", recursive=True) + del stack + + if args.use_gitignore: + tmp_targets = [] + for target in files: + command = ["git", "check-ignore", "-q", target.path] + exit_code = subprocess.run( + command, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ).returncode + """ + see: $ man git-check-ignore + EXIT STATUS + 0: One or more of the provided paths is ignored. + 1: None of the provided paths are ignored. + 128: A fatal error was encountered. + """ + if exit_code == 0: + pass + elif exit_code == 1: + tmp_targets.append(target) + elif exit_code == 128: + print( + f"Error: something wrong with --use-gitignore option {target.path!r}" + ) + sys.exit(0) + files = tmp_targets + for file in files: + try: + lexer = Lexer(file) + tokens = list(lexer) + context = Context(file, tokens, debug, args.R) + registry.run(context) + except CParsingError as e: + print(file.path + f": Error!\n\t{colors(e.msg, 'red')}") + sys.exit(1) + except KeyboardInterrupt: + sys.exit(1) + errors = format(files, use_colors=not args.no_colors) + print(errors, end="") + sys.exit(1 if any(len(it.errors) for it in files) else 0) + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/norminette/colors.py b/norminette/colors.py new file mode 100644 index 00000000..2bb8dc28 --- /dev/null +++ b/norminette/colors.py @@ -0,0 +1,62 @@ +from typing import Optional + +red = { + "TOO_MANY_ARGS", + "TOO_MANY_VARS_FUNC", +} +yellow = { + "MIXED_SPACE_TAB", + "BRACE_NEWLINE" +} +green = { + "TOO_MANY_FUNCS", +} +blue = { + "SPC_INSTEAD_TAB", + "TAB_INSTEAD_SPC", + "CONSECUTIVE_SPC", + "CONSECUTIVE_WS", + "SPC_BFR_OPERATOR", + "SPC_AFTER_OPERATOR", + "NO_SPC_BFR_OPR", + "NO_SPC_AFR_OPR", + "SPC_AFTER_PAR", + "SPC_BFR_PAR", + "NO_SPC_AFR_PAR", + "NO_SPC_BFR_PAR", + "SPC_AFTER_POINTER", + "SPC_LINE_START", + "SPC_BFR_POINTER", + "SPACE_BEFORE_FUNC", + "TOO_MANY_TABS_FUNC", + "TOO_MANY_TABS_TD", + "MISSING_TAB_FUNC", + "MISSING_TAB_VAR", + "TOO_MANY_TAB_VAR", + "LINE_TOO_LONG", + "EXP_PARENTHESIS", +} +pink = { + "WRONG_SCOPE_COMMENT", + "COMMENT_ON_INSTR", +} +grey = { + "INVALID_HEADER", + "WRONG_SCOPE_COMMENT", +} + +_color_table = { + "91": red, + "92": green, + "93": yellow, + "94": blue, + "95": pink, + "97": grey, +} + + +def error_color(name: str) -> Optional[str]: + for color, table in _color_table.items(): + if name in table: + return color + return None diff --git a/norminette/context.py b/norminette/context.py index c92404dc..0cc52c89 100644 --- a/norminette/context.py +++ b/norminette/context.py @@ -1,8 +1,10 @@ -from norm_error import NormError -from lexer.dictionary import operators, brackets -from tools.colors import colors -from scope import * -from exceptions import CParsingError +from dataclasses import dataclass, field + +from norminette.errors import Error, Highlight +from norminette.lexer import Token +from norminette.exceptions import CParsingError +from norminette.scope import GlobalScope, ControlStructure +from norminette.tools.colors import colors types = [ "CHAR", @@ -15,14 +17,10 @@ "LONG", "SHORT", "SIGNED", - "UNSIGNED" + "UNSIGNED", ] -utypes = [ - "STRUCT", - "ENUM", - "UNION" -] +utypes = ["STRUCT", "ENUM", "UNION"] glued_operators = [ "MINUS", @@ -105,54 +103,99 @@ "GOTO", "LABEL", "SWITCH", - "CASE" + "CASE", ] misc_specifiers = [ "CONST", + "RESTRICT", "REGISTER", "STATIC", "VOLATILE", "EXTERN", "INLINE", - "RESTRICT" + "RESTRICT", "SIGNED", "UNSIGNED", ] -size_specifiers = [ - "LONG", - "SHORT" +assigns = [ + "RIGHT_ASSIGN", + "LEFT_ASSIGN", + "ADD_ASSIGN", + "SUB_ASSIGN", + "MUL_ASSIGN", + "DIV_ASSIGN", + "MOD_ASSIGN", + "AND_ASSIGN", + "XOR_ASSIGN", + "OR_ASSIGN", + "ASSIGN", ] -sign_specifiers = [ - "SIGNED", - "UNSIGNED" -] +size_specifiers = ["LONG", "SHORT"] -whitespaces = [ - "SPACE", - "TAB", - "ESCAPED_NEWLINE", - "NEWLINE" -] +sign_specifiers = ["SIGNED", "UNSIGNED"] -arg_separator = [ - "COMMA", - "CLOSING_PARENTHESIS" -] +whitespaces = ["SPACE", "TAB", "ESCAPED_NEWLINE", "NEWLINE"] + +arg_separator = ["COMMA", "CLOSING_PARENTHESIS"] + + +@dataclass +class Macro: + name: str + is_func: bool = field(default=False) + + @classmethod + def from_token(self, token, **kwargs): + name = token.value or token.type + return Macro(name, **kwargs) + + +class PreProcessors: + def __init__(self) -> None: + self.indent = 0 + + self.macros = [] + self.includes = [] + + self.total_ifs = 0 + self.total_elifs = 0 + self.total_elses = 0 + self.total_ifdefs = 0 + self.total_ifndefs = 0 + + self.skip_define = False + + @property + def indent(self): + return self._indent + + @indent.setter + def indent(self, value): + self._indent = max(0, value) + + def has_macro_defined(self, name): + for macro in self.macros: + if macro.name == name: + return True + return False class Context: - def __init__(self, filename, tokens, debug=0): + def __init__(self, file, tokens, debug=0, added_value=[]): + # Header relative informations + self.header_started = False + self.header_parsed = False + self.header = "" # File relative informations - self.filename = filename.split('/')[-1] - self.filetype = filename.split('.')[-1] # ? + self.file = file self.tokens = tokens self.debug = int(debug) # Rule relative informations self.history = [] - self.errors = [] + self.errors = file.errors self.tkn_scope = len(tokens) # Scope informations @@ -163,7 +206,9 @@ def __init__(self, filename, tokens, debug=0): self.arg_pos = [0, 0] # Preprocessor handling - self.preproc_scope_indent = 0 + self.protected = False + self.preproc = PreProcessors() + self.preproc.skip_define = "CheckDefine" in (added_value or []) def peek_token(self, pos): return self.tokens[pos] if pos < len(self.tokens) else None @@ -172,22 +217,40 @@ def pop_tokens(self, stop): self.tokens = self.tokens[stop:] def check_token(self, pos, value): - """Compares the token at 'pos' against a value or list of values - """ + """Compares the token at 'pos' against a value or list of values""" tkn = self.peek_token(pos) if tkn is None: return None - if isinstance(value, list): - if tkn.type in value: - return True - return False + if isinstance(value, (tuple, list)): + return tkn.type in value return tkn.type == value - def new_error(self, errno, tkn): - self.errors.append(NormError(errno, tkn.pos[0], tkn.pos[1])) + def find_in_scope(self, value, nested=True): + nests = 0 + for i in range(0, self.tkn_scope): + tkn = self.peek_token(i) + if self.check_token(i, ["LBRACKET", "LPARENTHESIS", "LBRACE"]) is True: + nests += 1 + if self.check_token(i, ["RBRACKET", "RPARENTHESIS", "RBRACE"]) is True: + nests -= 1 + if tkn.type == value and ( + nested is True or (nests == 0 and nested is False) + ): + return i + return -1 + + def new_error(self, errno, tkn: Token): + # XXX Deprecated, use `.errors` and `norminette.errors` module. + error = Error.from_name(errno, highlights=[Highlight.from_token(tkn)]) + self.errors.add(error) + + def new_warning(self, errno, tkn: Token): + # XXX Deprecated, use `.errors` and `norminette.errors` module. + error = Error.from_name(errno, level="Notice", highlights=[Highlight.from_token(tkn)]) + self.errors.append(error) def get_parent_rule(self): if len(self.history) == 0: @@ -196,15 +259,23 @@ def get_parent_rule(self): def update(self): """Updates informations about the context and the scope if needed - after a primary rule has succeeded. - Do nothing on empty lines since they can be anywhere + after a primary rule has succeeded. + Do nothing on empty lines since they can be anywhere """ - if len(self.history) > 0 and (self.history[-1] == "IsEmptyLine" or self.history[-1] == "IsComment" or self.history[-1] == "IsPreprocessorStatement"): + if len(self.history) > 0 and ( + self.history[-1] == "IsEmptyLine" + or self.history[-1] == "IsComment" + or self.history[-1] == "IsPreprocessorStatement" + ): return if self.sub is not None: self.scope = self.sub self.sub = None - if type(self.scope) is ControlStructure and self.scope.multiline is False and self.scope.instructions > 0: + if ( + type(self.scope) is ControlStructure + and self.scope.multiline is False + and self.scope.instructions > 0 + ): self.scope = self.scope.outer() self.sub = None self.update() @@ -212,13 +283,15 @@ def update(self): def dprint(self, rule, pos): """Debug printing, shows the primary rules that succeed in matching - tokens and print the matching tokens + tokens and print the matching tokens """ if self.debug < 2: return - print(f"{colors(self.filename, 'cyan')} - {colors(rule, 'green')} \ + print( + f"{colors(self.file.basename, 'cyan')} - {colors(rule, 'green')} \ In \"{self.scope.name}\" from \ -\"{self.scope.parent.name if self.scope.parent is not None else None}\" line {self.tokens[0].pos[0]}\":") +\"{self.scope.parent.name if self.scope.parent is not None else None}\" line {self.tokens[0].pos[0]}\":" + ) i = 0 for t in self.tokens[:pos]: if i == 0: @@ -231,10 +304,12 @@ def dprint(self, rule, pos): i += 1 if pos - 1 < len(self.tokens) and self.tokens[pos - 1].type != "NEWLINE": print("") + elif len(self.tokens) == 1 and self.tokens[-1].type != "NEWLINE": + print("") def eol(self, pos): """Skips white space characters (tab, space) until end of line - (included) or any other token (excluded) + (included) or any other token (excluded) """ while self.check_token(pos, ["TAB", "SPACE", "NEWLINE"]) is True: if self.check_token(pos, "NEWLINE"): @@ -243,26 +318,27 @@ def eol(self, pos): pos += 1 return pos - def skip_ws(self, pos, nl=False): + def skip_ws(self, pos, nl=False, comment=False): ws = whitespaces[:] - if nl == False: + if nl is False: ws.remove("NEWLINE") + if comment: + ws += ("COMMENT", "MULT_COMMENT") while self.check_token(pos, ws): pos += 1 return pos - def skip_nest_reverse(self, pos): """Skips anything between two brackets, parentheses or braces starting - at 'pos', if the brackets, parentheses or braces are not closed or - are closed in the wrong order an error shall be raised + at 'pos', if the brackets, parentheses or braces are not closed or + are closed in the wrong order an error shall be raised """ rbrackets = ["LBRACKET", "LBRACE", "LPARENTHESIS"] lbrackets = ["RBRACKET", "RBRACE", "RPARENTHESIS"] try: c = self.peek_token(pos).type except: - raise CParsingError(f"Unexpected EOF line {pos}") + raise CParsingError(f"Error: Unexpected EOF line {pos}") if c not in lbrackets: return pos c = rbrackets[lbrackets.index(c)] @@ -276,22 +352,24 @@ def skip_nest_reverse(self, pos): if c == self.peek_token(i).type: return i i -= 1 - raise CParsingError("Nested parentheses, braces or brackets\ - are not correctly closed") + raise CParsingError( + "Error: Nested parentheses, braces or brackets\ + are not correctly closed" + ) return -1 def skip_nest(self, pos): """Skips anything between two brackets, parentheses or braces starting - at 'pos', if the brackets, parentheses or braces are not closed or - are closed in the wrong order an error shall be raised + at 'pos', if the brackets, parentheses or braces are not closed or + are closed in the wrong order an error shall be raised """ lbrackets = ["LBRACKET", "LBRACE", "LPARENTHESIS"] rbrackets = ["RBRACKET", "RBRACE", "RPARENTHESIS"] - try: - c = self.peek_token(pos).type - except: - raise CParsingError(f"Unexpected EOF line {pos}") + # try: + c = self.peek_token(pos).type + # except: + # raise CParsingError(f"Error: Code ended unexpectedly.") if c not in lbrackets: return pos c = rbrackets[lbrackets.index(c)] @@ -305,16 +383,35 @@ def skip_nest(self, pos): if c == self.peek_token(i).type: return i i += 1 - raise CParsingError("Nested parentheses, braces or brackets\ - are not correctly closed") + raise CParsingError( + "Error: Nested parentheses, braces or brackets\ + are not correctly closed" + ) return -1 def skip_misc_specifier(self, pos, nl=False): i = self.skip_ws(pos, nl=nl) + if self.check_token(i, "IDENTIFIER"): + tmp = self.skip_misc_specifier(i + 1) + if tmp != i + 1: + tmp = i + if self.check_token(i, "MULT"): + tmp = i + 1 + while self.check_token(tmp, "MULT"): + tmp += 1 + tmp = self.skip_ws(tmp, nl=nl) + if self.check_token(tmp, misc_specifiers): + i = tmp + i = self.skip_ws(i, nl=nl) while self.check_token(i, misc_specifiers): i += 1 i = self.skip_ws(i, nl=nl) + if self.check_token(i, "MULT"): + tmp = i + 1 + tmp = self.skip_ws(i, nl=nl) + if self.check_token(tmp, misc_specifiers): + i = tmp return i def skip_typedef(self, pos): @@ -326,12 +423,12 @@ def skip_typedef(self, pos): def check_type_specifier(self, pos, user_def_type=False, nl=False): """Returns (True, pos + n) if the tokens from 'pos' to 'n' could match - a valid type specifier. Valid type specifiers consist of: - -an optionnal 'misc' specifier (const, register, volatile ...) - -an optionnal size specifier (long or short) - -a type specifier (int, char, double, etc...) - OR an IDENTIFIER - OR a user type specifier (struct, union, enum) + IDENTIFIER + a valid type specifier. Valid type specifiers consist of: + -an optionnal 'misc' specifier (const, register, volatile ...) + -an optionnal size specifier (long or short) + -a type specifier (int, char, double, etc...) + OR an IDENTIFIER + OR a user type specifier (struct, union, enum) + IDENTIFIER """ i = self.skip_misc_specifier(pos, nl=nl) i = self.skip_ws(i, nl=nl) @@ -342,7 +439,7 @@ def check_type_specifier(self, pos, user_def_type=False, nl=False): if self.check_token(i, "IDENTIFIER") is True: i += 1 return True, i - #Raise CParsingError? + # Raise CParsingError? if self.check_token(i, types + ["IDENTIFIER", "TYPEDEF"]) is False: return False, 0 if self.check_token(i, "IDENTIFIER") is True: @@ -362,16 +459,22 @@ def check_type_specifier(self, pos, user_def_type=False, nl=False): return False, 0 if self.check_token(i, "IDENTIFIER") is True: i += 1 + # i = self.skip_ws(i) return True, i + 1 - while self.check_token(i, types + whitespaces + ["MULT", "BWISE_AND"]) is True: + while ( + self.check_token(i, types + whitespaces + ["MULT", "BWISE_AND"]) is True + ): i += 1 - i = self.skip_misc_specifier(i, nl=nl) - return True, i + tmp = self.skip_misc_specifier(i, nl=nl) + if tmp == i: + return True, i - 1 + else: + return True, tmp def check_identifier(self, pos, nl=False): """ - Determines the function return value or the variable type and returns - an iterator to the next token + Determines the function return value or the variable type and returns + an iterator to the next token """ i = pos p = 0 @@ -379,10 +482,9 @@ def check_identifier(self, pos, nl=False): while self.check_token(i, whitespaces + ["MULT", "LPARENTHESIS"]) is True: if self.check_token(i, "LPARENTHESIS"): p += 1 - if self.check_token(i, "MULT") and self.check_token(i+1, "CONST"): + if self.check_token(i, "MULT") and self.check_token(i + 1, "CONST"): i += 1 i += 1 - i = self.skip_misc_specifier(i, nl=nl) if self.check_token(i, "IDENTIFIER"): while p and self.check_token(i, whitespaces + ["RPARENTHESIS"]) is True: @@ -394,21 +496,30 @@ def check_identifier(self, pos, nl=False): def is_glued_operator(self, pos): """ - Returns True if operator (among +-) at given pos is glued to identifier, number - or constant + Returns True if operator (among +-) at given pos is glued to identifier, number + or constant """ glued = [ - 'LPARENTHESIS', - 'LBRACKET', - 'LBRACE', + "LPARENTHESIS", + "LBRACKET", + "LBRACE", ] glued = glued + glued_operators start = pos - if self.check_token(pos, ['PLUS', 'MINUS', 'BWISE_OR', 'BWISE_AND', 'BWISE_NOT', 'BWISE_XOR']) is False: + if ( + self.check_token( + pos, + ["PLUS", "MINUS", "BWISE_OR", "BWISE_AND", "BWISE_NOT", "BWISE_XOR"], + ) + is False + ): return False pos += 1 pos = self.skip_ws(pos, nl=False) - if self.check_token(pos, ['IDENTIFIER', 'CONSTANT']) is False: + if ( + self.check_token(pos, ["IDENTIFIER", "CONSTANT", "MULT", "BWISE_AND"]) + is False + ): return False pos = start - 1 while (self.check_token(pos, ["SPACE", "TAB"])) is True: @@ -419,20 +530,23 @@ def is_glued_operator(self, pos): def is_operator(self, pos): """ - Returns True if the given operator (among '*&') is an actual operator, - and returns False if said operator is a pointer/adress indicator + Returns True if the given operator (among '*&') is an actual operator, + and returns False if said operator is a pointer/adress indicator """ - i = 0 start = pos + 1 pos -= 1 - if self.history[-1] == "IsFuncPrototype" or self.history[-1] == "IsFuncDeclaration": + if ( + self.history[-1] == "IsFuncPrototype" + or self.history[-1] == "IsFuncDeclaration" + ): return False if self.check_token(start, ["RPARENTHESIS", "MULT"]) is True: return False start = self.skip_ws(start, nl=False) if self.check_token(start, ["SIZEOF"]) is True: return True - if self.history[-1] == 'IsVarDeclaration': + if self.history[-1] == "IsVarDeclaration": + bracketed = False tmp = pos right_side = False while tmp > 0: @@ -440,33 +554,68 @@ def is_operator(self, pos): tmp = self.skip_nest_reverse(tmp) - 1 if self.check_token(tmp, ["ASSIGN"]) is True: right_side = True + if self.check_token(tmp, "LBRACKET") is True: + bracketed = True tmp -= 1 - if right_side == False: + if right_side is False and bracketed is False: return False skip = 0 + value_before = False while pos > 0: if self.check_token(pos, ["RBRACKET", "RPARENTHESIS"]) is True: + value_before = True pos = self.skip_nest_reverse(pos) - 1 - if self.parenthesis_contain(pos + 1)[0] == 'cast': + if ( + self.check_token(pos + 1, "LPARENTHESIS") is True + and self.parenthesis_contain(pos + 1)[0] == "variable" + ): + return True + if ( + self.check_token(pos + 1, "LPARENTHESIS") is True + and self.parenthesis_contain(pos + 1)[0] == "cast" + ): return False skip = 1 - if self.check_token(pos, ["IDENTIFIER", "CONSTANT", "SIZEOF"]) is True: - if self.check_token(pos, "IDENTIFIER") is True and self.check_token(pos + 1, "TAB") is True: + if ( + self.check_token( + pos, ["IDENTIFIER", "CONSTANT", "SIZEOF", "CHAR_CONST"] + ) + is True + ): + if ( + self.check_token(pos, "IDENTIFIER") is True + and self.check_token(pos + 1, "TAB") is True + ): return False return True - if self.check_token(pos, ["COMMA", "LPARENTHESIS"] + operators) is True and skip == 1: + if ( + self.check_token(pos, ["COMMA", "LPARENTHESIS", "LBRACKET"] + operators) + is True + and skip == 1 + and self.parenthesis_contain(pos + 1)[0] != "cast" + ): return True - if self.check_token(pos, ["LBRACKET", "LPARENTHESIS", "MULT", "BWISE_AND", "COMMA"] + operators + types): + if self.check_token( + pos, + ["LBRACKET", "LPARENTHESIS", "MULT", "BWISE_AND", "COMMA"] + + operators + + types, + ): return False pos -= 1 - return False + if value_before is True: + return True + else: + return False def parenthesis_contain(self, i, ret_store=None): """ - Explore parenthesis to return its content - Function, pointer, cast, or other - Uses basic string as return value and skips to the end of the parenthesis nest + Explore parenthesis to return its content + Function, pointer, cast, or other + Uses basic string as return value and skips to the end of the parenthesis nest """ + if self.check_token(i, "LPARENTHESIS") is False: + return None, i start = i ws = ["SPACE", "TAB", "NEWLINE"] i += 1 @@ -474,24 +623,40 @@ def parenthesis_contain(self, i, ret_store=None): nested_id = False identifier = None pointer = None - while (deep > 0): + sizeof = False + id_only = True + if self.check_token(start - 1, "SIZEOF") is True: + sizeof = True + i = self.skip_ws(i) + while deep > 0 and self.peek_token(i) is not None: + # print (self.peek_token(i), deep, identifier, self.check_token(i, "NULL")) if self.check_token(i, "RPARENTHESIS"): deep -= 1 elif self.check_token(i, "LPARENTHESIS"): deep += 1 - if identifier is not None and deep >= 0: - return "pointer", self.skip_nest(start) - elif self.check_token(i, "COMMA") and nested_id == True: + # if identifier is not None and deep >= 0: + # return "pointer", self.skip_nest(start) + elif ( + deep > 1 + and identifier is True + and self.check_token(i, ["NULL", "IDENTIFIER"]) + ): + return "fct_call", self.skip_nest(start) + elif self.check_token(i, "COMMA") and nested_id is True: return "function", self.skip_nest(start) + elif self.check_token(i, assigns) and deep == 1: + return "assign", self.skip_nest(start) + elif self.check_token(i, "PTR") and deep == 1: + return "variable", self.skip_nest(start) elif self.check_token(i, "COMMA"): return None, self.skip_nest(start) elif self.check_token(i, ws): pass elif self.check_token(i, types): tmp = start - 1 - while self.check_token(tmp, ["SPACE", "TAB"]) == True: + while self.check_token(tmp, ["SPACE", "TAB"]) is True: tmp -= 1 - if self.check_token(tmp, "SIZEOF") == True: + if self.check_token(tmp, "SIZEOF") is True: return None, self.skip_nest(start) tmp = start + 1 while self.check_token(tmp, "RPARENTHESIS") is False: @@ -502,17 +667,37 @@ def parenthesis_contain(self, i, ret_store=None): return "cast", self.skip_nest(start) elif self.check_token(i, "IDENTIFIER"): tmp = i + 1 - if (identifier is not True and pointer == True) or ret_store is not None: + if ( + identifier is not True and pointer is True + ) or ret_store is not None: nested_id = True + if ( + identifier is not True + and self.check_token(tmp, "RPARENTHESIS") + and self.scope.name == "Function" + and deep == 1 + and pointer is None + and sizeof is False + ): + tmp = self.skip_nest(start) + 1 + tmp = self.skip_ws(tmp) + if ( + self.check_token( + tmp, ["IDENTIFIER", "CONSTANT", "MINUS", "PLUS"] + ) + is False + ): + return None, self.skip_nest(start) + return "cast", self.skip_nest(start) identifier = True tmp = self.skip_ws(tmp) - if pointer == True: + if pointer is True: if self.check_token(tmp, "LBRACKET"): tmp = self.skip_nest(tmp) tmp += 1 while self.check_token(tmp, "RPARENTHESIS"): tmp += 1 - #start = tmp + # start = tmp tmp = self.skip_ws(tmp) if self.check_token(tmp, "LPARENTHESIS"): return "pointer", self.skip_nest(start) @@ -521,14 +706,16 @@ def parenthesis_contain(self, i, ret_store=None): elif self.check_token(i, ["MULT", "BWISE_AND"]): tmp = i + 1 pointer = True - if identifier != None: + if identifier is not None: tmp = start - 1 - while self.check_token(tmp, ["SPACE", "TAB"]) == True: + while self.check_token(tmp, ["SPACE", "TAB"]) is True: tmp -= 1 - if self.check_token(tmp, "SIZEOF") == True: + if self.check_token(tmp, "SIZEOF") is True: return None, self.skip_nest(start) tmp = self.skip_ws(i + 1) if self.check_token(tmp, "RPARENTHESIS") is True: return "cast", self.skip_nest(start) i += 1 + if identifier is True and id_only is True: + return "var", self.skip_nest(start) return None, self.skip_nest(start) diff --git a/norminette/errors.py b/norminette/errors.py new file mode 100644 index 00000000..5134c83a --- /dev/null +++ b/norminette/errors.py @@ -0,0 +1,227 @@ +from __future__ import annotations + +import os +import json +from dataclasses import dataclass, field, asdict +from typing import ( + TYPE_CHECKING, + Sequence, + Union, + Literal, + Optional, + List, + overload, + Any, + Type, +) + +from norminette.colors import error_color +from norminette.norm_error import errors as errors_dict + +if TYPE_CHECKING: + from norminette.lexer import Token + from norminette.file import File + +ErrorLevel = Literal["Error", "Notice"] + + +@dataclass +class Highlight: + lineno: int + column: int + length: Optional[int] = field(default=None) + hint: Optional[str] = field(default=None) + + @classmethod + def from_token( + cls, + token: Token, + *, + hint: Optional[str] = None, + ) -> Highlight: + return cls(token.lineno, token.column, token.unsafe_length, hint) + + def __lt__(self, other: Any) -> bool: + assert isinstance(other, Highlight) + if self.lineno == other.lineno: + if self.column == other.column: + return len(self.hint or '') > len(other.hint or '') + return self.column > other.column + return self.lineno > other.lineno + + +@dataclass +class Error: + name: str + text: str + level: ErrorLevel = field(default="Error") + highlights: List[Highlight] = field(default_factory=list) + + @classmethod + def from_name(cls: Type[Error], /, name: str, **kwargs) -> Error: + return cls(name, errors_dict[name], **kwargs) + + def __lt__(self, other: Any) -> bool: + assert isinstance(other, Error) + if not self.highlights: + return bool(other.highlights) or self.name > other.name + if not other.highlights: + return bool(self.highlights) or other.name > self.name + ah, bh = min(self.highlights), min(other.highlights) + if ah.column == bh.column and ah.lineno == bh.lineno: + return self.name < other.name + return (ah.lineno, ah.column) < (bh.lineno, bh.column) + + @overload + def add_highlight( + self, + lineno: int, + column: int, + length: Optional[int] = None, + hint: Optional[str] = None, + ) -> None: ... + @overload + def add_highlight(self, highlight: Highlight, /) -> None: ... + + def add_highlight(self, *args, **kwargs) -> None: + if len(args) == 1: + highlight, = args + else: + highlight = Highlight(*args, **kwargs) + self.highlights.append(highlight) + + +class Errors: + __slots__ = "_inner" + + def __init__(self) -> None: + self._inner: List[Error] = [] + + def __repr__(self) -> str: + return repr(self._inner) + + def __len__(self) -> int: + return len(self._inner) + + def __iter__(self): + self._inner.sort() + return iter(self._inner) + + @overload + def add(self, error: Error) -> None: + """Add an `Error` instance to the errors. + """ + ... + + @overload + def add(self, name: str, *, level: ErrorLevel = "Error", highlights: List[Highlight] = ...) -> None: + """Builds an `Error` instance from a name in `errors_dict` and adds it to the errors. + + ```python + >>> errors.add("TOO_MANY_LINES") + >>> errors.add("INVALID_HEADER") + >>> errors.add("GLOBAL_VAR_DETECTED", level="Notice") + ``` + """ + ... + + @overload + def add( + self, + /, + name: str, + text: str, + *, + level: ErrorLevel = "Error", + highlights: List[Highlight] = ..., + ) -> None: + """Builds an `Error` instance and adds it to the errors. + + ```python + >>> errors.add("BAD_IDENTATION", "You forgot an column here") + >>> errors.add("CUSTOM_ERROR", f"name {not_defined!r} is not defined. Did you mean: {levenshtein_distance}?") + >>> errors.add("NOOP", "Empty if statement", level="Notice") + ``` + """ + ... + + def add(self, *args, **kwargs) -> None: + kwargs.setdefault("level", "Error") + error = None + if len(args) == 1: + error = args[0] + if isinstance(error, str): + error = Error.from_name(error, **kwargs) + if len(args) == 2: + error = Error(*args, **kwargs) + assert isinstance(error, Error), "bad function call" + return self._inner.append(error) + + @property + def status(self) -> Literal["OK", "Error"]: + return "OK" if all(it.level == "Notice" for it in self._inner) else "Error" + + def append(self, *args, **kwargs): + """Deprecated alias for `.add(...)`, kept for backward compatibility. + + Use `.add(...)` instead. + """ + return self.add(*args, **kwargs) + + +class _formatter: + name: str + + def __init__(self, files: Union[File, Sequence[File]], **options) -> None: + if not isinstance(files, Sequence): + files = [files] + self.files = files + self.options = options + + def __init_subclass__(cls) -> None: + cls.name = cls.__name__.rstrip("ErrorsFormatter").lower() + + +class HumanizedErrorsFormatter(_formatter): + @property + def use_colors(self) -> bool: + return self.options.get("use_colors", True) + + def _colorize_error_text(self, error: Error) -> str: + color = error_color(error.name) + if not self.use_colors or not color: + return error.text + return f"\x1b[{color}m{error.text}\x1b[0m" + + def __str__(self) -> str: + output = '' + for file in self.files: + output += f"{file.basename}: {file.errors.status}!" + for error in file.errors: + highlight = error.highlights[0] + error_text = self._colorize_error_text(error) + output += f"\n{error.level}: {error.name:<20} " + output += f"(line: {highlight.lineno:>3}, col: {highlight.column:>3}):\t{error_text}" + output += '\n' + return output + + +class JSONErrorsFormatter(_formatter): + def __str__(self): + files = [] + for file in self.files: + files.append({ + "path": os.path.abspath(file.path), + "status": file.errors.status, + "errors": tuple(map(asdict, file.errors)), + }) + output = { + "files": files, + } + return json.dumps(output, separators=(',', ':')) + '\n' + + +formatters = ( + JSONErrorsFormatter, + HumanizedErrorsFormatter, +) diff --git a/norminette/exceptions.py b/norminette/exceptions.py index c45239d0..a90ff597 100644 --- a/norminette/exceptions.py +++ b/norminette/exceptions.py @@ -1,4 +1,8 @@ -class CParsingError(Exception): +class NorminetteError(Exception): + pass + + +class CParsingError(NorminetteError): def __init__(self, errmsg): self.msg = errmsg @@ -7,3 +11,12 @@ def __str__(self): def __repr__(self): return self.__str__ + + +class MaybeInfiniteLoop(NorminetteError): + def __init__(self) -> None: + super().__init__("The maximum number of iterations a loop can have has been reached") + + +class UnexpectedEOF(NorminetteError): + pass diff --git a/norminette/file.py b/norminette/file.py new file mode 100644 index 00000000..4f682946 --- /dev/null +++ b/norminette/file.py @@ -0,0 +1,24 @@ +import os +from typing import Optional + +from norminette.errors import Errors + + +class File: + def __init__(self, path: str, source: Optional[str] = None) -> None: + self.path = path + self._source = source + + self.errors = Errors() + self.basename = os.path.basename(path) + self.name, self.type = os.path.splitext(self.basename) + + @property + def source(self) -> str: + if self._source is None: + with open(self.path) as file: + self._source = file.read() + return self._source + + def __repr__(self) -> str: + return f"" diff --git a/norminette/i18n.py b/norminette/i18n.py new file mode 100644 index 00000000..17bf88c2 --- /dev/null +++ b/norminette/i18n.py @@ -0,0 +1,263 @@ +import os +import subprocess +import sys +from pathlib import Path +from importlib.metadata import version +from typing import List + + +__all__ = ( + "set_locale", + "get_env_locale", + "_", +) + +LOCALES = ( + "en_US", + "pt_BR", +) + +LOCALE_DIR = Path(__file__).parent / "locale" + +DOMAIN = "norminette" + +# Default fallback +_ = lambda _: _ # noqa: E731 + + +def set_locale(locale: str) -> None: + """ + Set the locale for the application. + This function loads the translation files from the locale directory and sets the translation function. + """ + print(f"Setting locale to {locale}") + global _ + try: + import gettext + + translation = gettext.translation( + DOMAIN, + localedir=str(LOCALE_DIR), + languages=[locale], + fallback=True, + ) + _ = translation.gettext + except ImportError: + raise + + +def get_env_locale(default: str = "en_US") -> str: + """ + Get the locale from the environment. + This function returns the locale based on the LANGUAGE environment variable. + """ + keys = ( + "NORMINETTE_LOCALE", + "LOCALE", + ) + for key in keys: + locale = os.environ.get(key) + if locale: + return locale.split(":")[0] + return default + + +def _get_pot_file_path() -> Path: + """ + Get the path to the .pot file. + This function returns the path to the .pot file in the locale directory. + """ + return LOCALE_DIR / f"{DOMAIN}.pot" + + +def _collect_python_files(root_dir: Path) -> List[Path]: + """ + Collect all Python source files from the given root directory. + """ + return [file for file in root_dir.rglob("norminette/**/*.py")] + + +def _create_pot_file() -> None: + """ + Create the .pot file by extracting translatable strings from Python source files. + """ + root_dir = Path(__file__).parent.parent + source_files = _collect_python_files(root_dir) + + try: + result = subprocess.run( + [ + "xgettext", + "-o", + str(_get_pot_file_path()), + "--from-code=UTF-8", + "--keyword=_", + ] + [str(file.relative_to(root_dir)) for file in source_files], + check=True, + capture_output=True, + text=True, + ) + if result.returncode == 0: + _update_pot_header(_get_pot_file_path()) + print("Successfully created .pot file.") + else: + print(f"xgettext exited with code {result.returncode}.") + print(f"Error output: {result.stderr}") + sys.exit(1) + except subprocess.CalledProcessError as e: + print(f"Error while running xgettext: {e}") + print(f"Output: {e.output}") + sys.exit(1) + + +def _create_or_update_po_files() -> None: + """ + Create or update .po files for each locale. + This function creates or updates .po files for each locale defined in the LOCALES list + using the .pot file as a template and placing them in the correct locale directory. + """ + pot_file = _get_pot_file_path() + if not pot_file.exists(): + print("Error: .pot file not found. Run _create_pot_file() first.") + sys.exit(1) + + for locale in LOCALES: + locale_path = LOCALE_DIR / locale / "LC_MESSAGES" + locale_path.mkdir(parents=True, exist_ok=True) + + po_file = locale_path / f"{DOMAIN}.po" + + if po_file.exists(): + try: + # Merge existing .po file with the updated .pot file + result = subprocess.run( + ["msgmerge", "--update", "--backup=none", str(po_file), str(pot_file)], + check=True, + text=True, + stderr=subprocess.STDOUT, + ) + if result.returncode == 0: + print(f"Successfully updated {locale} .po file") + _update_po_header(po_file) + else: + print(f"Error updating {locale} .po file:") + print(result.stdout) + print(result.stderr) + sys.exit(1) + except subprocess.CalledProcessError as e: + print(f"Error while updating {locale} .po file: {e}") + print(f"Output: {e.output}") + sys.exit(1) + else: + try: + # Initialize .po file from .pot template + result = subprocess.run( + ["msginit", "--no-translator", + "--input", str(pot_file), + "--output-file", str(po_file), + "--locale", locale], + check=True, + text=True, + stderr=subprocess.STDOUT, # Redirect stderr to stdout + ) + if result.returncode == 0: + print(f"Successfully created {locale} .po file") + _update_po_header(po_file) + else: + print(f"Error creating {locale} .po file.") + print(result.stderr) + print(result.stdout) + sys.exit(1) + except subprocess.CalledProcessError as e: + print(f"Error while creating {locale} .po file: {e}") + print(f"Output: {e.output}") + sys.exit(1) + + +def _update_pot_header(pot_file: Path) -> None: + """ + Update the header of the .pot file to set the charset to UTF-8 and update other metadata. + """ + project_version = version("norminette") + try: + with pot_file.open("r", encoding="utf-8") as f: + lines = f.readlines() + + with pot_file.open("w", encoding="utf-8") as f: + for line in lines: + if "Content-Type:" in line: + f.write('"Content-Type: text/plain; charset=UTF-8\\n"\n') + elif "Project-Id-Version:" in line: + f.write(f'"Project-Id-Version: {project_version}\\n"\n') + else: + f.write(line) + + print(f"Updated header of {pot_file} with charset=UTF-8 and version={project_version}.") + except Exception as e: + print(f"Error while updating .pot header: {e}") + sys.exit(1) + + +def _update_po_header(po_file: Path) -> None: + """ + Update the header of the .po file to set the project version and other metadata. + """ + project_version = version("norminette") + try: + with po_file.open("r", encoding="utf-8") as f: + lines = f.readlines() + + with po_file.open("w", encoding="utf-8") as f: + for line in lines: + if "Project-Id-Version:" in line: + f.write(f'"Project-Id-Version: {project_version}\\n"\n') + else: + f.write(line) + + print(f"Updated header of {po_file} with version={project_version}.") + except Exception as e: + print(f"Error while updating .po header: {e}") + sys.exit(1) + + +def _compile_mo_files() -> None: + """ + Compile .po files into .mo files for each locale. + This function compiles the .po files into .mo files, which are used by gettext for translations. + """ + for locale in LOCALES: + po_file = LOCALE_DIR / locale / "LC_MESSAGES" / f"{DOMAIN}.po" + mo_file = LOCALE_DIR / locale / "LC_MESSAGES" / f"{DOMAIN}.mo" + + if po_file.exists(): + try: + result = subprocess.run( + ["msgfmt", str(po_file), "-o", str(mo_file)], + check=True, + capture_output=True, + text=True, + ) + if result.returncode == 0: + print(f"Successfully compiled {locale} .mo file") + else: + print(f"Error compiling {locale} .mo file: {result.stderr}") + sys.exit(1) + except subprocess.CalledProcessError as e: + print(f"Error while compiling {locale} .mo file: {e}") + print(f"Output: {e.output}") + sys.exit(1) + else: + print(f"Warning: .po file for {locale} does not exist. Skipping compilation.") + + +set_locale(get_env_locale()) + + +if __name__ == "__main__": + try: + _create_pot_file() + _create_or_update_po_files() + _compile_mo_files() + except Exception as e: + print(f"Unexpected error: {e}") + sys.exit(1) diff --git a/norminette/lexer/__init__.py b/norminette/lexer/__init__.py index 91156a86..1d14758c 100644 --- a/norminette/lexer/__init__.py +++ b/norminette/lexer/__init__.py @@ -1,2 +1,4 @@ -from .tokens import Token -from .lexer import Lexer, TokenError +from norminette.lexer.lexer import Lexer +from norminette.lexer.tokens import Token + +__all__ = ["Lexer", "Token"] diff --git a/norminette/lexer/dictionary.py b/norminette/lexer/dictionary.py index 4698f0a4..a971e125 100644 --- a/norminette/lexer/dictionary.py +++ b/norminette/lexer/dictionary.py @@ -1,58 +1,62 @@ """ Dictionary that correlates lexeme with token """ -keywords = { - # C reserved keywords # - 'auto': "AUTO", - 'break': "BREAK", - 'case': "CASE", - 'char': "CHAR", - 'const': "CONST", - 'continue': "CONTINUE", - 'default': "DEFAULT", - 'do': "DO", - 'double': "DOUBLE", - 'else': "ELSE", - 'enum': "ENUM", - 'extern': "EXTERN", - 'float': "FLOAT", - 'for': "FOR", - 'goto': "GOTO", - 'if': "IF", - 'int': "INT", - 'long': "LONG", - 'register': "REGISTER", - 'return': "RETURN", - 'short': "SHORT", - 'signed': "SIGNED", - 'sizeof': "SIZEOF", - 'static': "STATIC", - 'struct': "STRUCT", - 'switch': "SWITCH", - 'typedef': "TYPEDEF", - 'union': "UNION", - 'unsigned': "UNSIGNED", - 'void': "VOID", - 'volatile': "VOLATILE", - 'while': "WHILE", - 'inline': "INLINE", - 'NULL': "NULL", - 'restrict': "RESTRICT" +trigraphs = { + "??<": '{', + "??>": '}', + "??(": '[', + "??)": ']', + "??=": '#', + "??/": '\\', + "??'": '^', + "??!": '|', + "??-": '~', } -preproc_keywords = { - 'define': "DEFINE", - 'error': "ERROR", - 'endif': "ENDIF", - 'elif': "ELIF", - 'ifdef': "IFDEF", - 'ifndef': "IFNDEF", - 'if': "#IF", - 'else': "#ELSE", - 'include': "INCLUDE", - 'pragma': "PRAGMA", - 'undef': "UNDEF", - 'warning': 'WARNING', - 'import': 'IMPORT' +digraphs = { + "<%": '{', + "%>": '}', + "<:": '[', + ":>": ']', + "%:": '#', +} + +keywords = { + # C reserved keywords # + "auto": "AUTO", + "break": "BREAK", + "case": "CASE", + "char": "CHAR", + "const": "CONST", + "continue": "CONTINUE", + "default": "DEFAULT", + "do": "DO", + "double": "DOUBLE", + "else": "ELSE", + "enum": "ENUM", + "extern": "EXTERN", + "float": "FLOAT", + "for": "FOR", + "goto": "GOTO", + "if": "IF", + "int": "INT", + "long": "LONG", + "register": "REGISTER", + "return": "RETURN", + "short": "SHORT", + "signed": "SIGNED", + "sizeof": "SIZEOF", + "static": "STATIC", + "struct": "STRUCT", + "switch": "SWITCH", + "typedef": "TYPEDEF", + "union": "UNION", + "unsigned": "UNSIGNED", + "void": "VOID", + "volatile": "VOLATILE", + "while": "WHILE", + "inline": "INLINE", + "NULL": "NULL", + "restrict": "RESTRICT", } """ @@ -64,53 +68,56 @@ """ operators = { - '>>=': "RIGHT_ASSIGN", - '<<=': "LEFT_ASSIGN", - '+=': "ADD_ASSIGN", - '-=': "SUB_ASSIGN", - '*=': "MUL_ASSIGN", - '/=': "DIV_ASSIGN", - '%=': "MOD_ASSIGN", - '&=': "AND_ASSIGN", - '^=': "XOR_ASSIGN", - '|=': "OR_ASSIGN", - '<=': "LESS_OR_EQUAL", - '>=': "GREATER_OR_EQUAL", - '==': "EQUALS", - '!=': "NOT_EQUAL", - '=': "ASSIGN", - ';': "SEMI_COLON", - ':': "COLON", - ',': "COMMA", - '.': "DOT", - '!': "NOT", - '-': "MINUS", - '+': "PLUS", - '*': "MULT", - '/': "DIV", - '%': "MODULO", - '<': "LESS_THAN", - '>': "MORE_THAN", - '...': "ELLIPSIS", - '++': "INC", - '--': "DEC", - '->': "PTR", - '&&': "AND", - '||': "OR", - '^': "BWISE_XOR", - '|': "BWISE_OR", - '~': "BWISE_NOT", - '&': "BWISE_AND", - '>>': "RIGHT_SHIFT", - '<<': "LEFT_SHIFT", - '?': "TERN_CONDITION" + ">>=": "RIGHT_ASSIGN", + "<<=": "LEFT_ASSIGN", + "+=": "ADD_ASSIGN", + "-=": "SUB_ASSIGN", + "*=": "MUL_ASSIGN", + "/=": "DIV_ASSIGN", + "%=": "MOD_ASSIGN", + "&=": "AND_ASSIGN", + "^=": "XOR_ASSIGN", + "|=": "OR_ASSIGN", + "<=": "LESS_OR_EQUAL", + ">=": "GREATER_OR_EQUAL", + "==": "EQUALS", + "!=": "NOT_EQUAL", + "=": "ASSIGN", + ";": "SEMI_COLON", + ":": "COLON", + ",": "COMMA", + ".": "DOT", + "!": "NOT", + "-": "MINUS", + "+": "PLUS", + "*": "MULT", + "/": "DIV", + "%": "MODULO", + "<": "LESS_THAN", + ">": "MORE_THAN", + "...": "ELLIPSIS", + "++": "INC", + "--": "DEC", + "->": "PTR", + "&&": "AND", + "||": "OR", + "^": "BWISE_XOR", + "|": "BWISE_OR", + "~": "BWISE_NOT", + "&": "BWISE_AND", + ">>": "RIGHT_SHIFT", + "<<": "LEFT_SHIFT", + "?": "TERN_CONDITION", + "#": "HASH", } brackets = { - '{': "LBRACE", - '}': "RBRACE", - '(': "LPARENTHESIS", - ')': "RPARENTHESIS", - '[': "LBRACKET", - ']': "RBRACKET" + "{": "LBRACE", + "}": "RBRACE", + "(": "LPARENTHESIS", + ")": "RPARENTHESIS", + "[": "LBRACKET", + "]": "RBRACKET", } + +__all__ = ["brackets", "operators", "keywords"] diff --git a/norminette/lexer/lexer.py b/norminette/lexer/lexer.py index e86bb1f9..a3c79e1f 100644 --- a/norminette/lexer/lexer.py +++ b/norminette/lexer/lexer.py @@ -1,483 +1,537 @@ import re import string -from lexer.tokens import Token -from lexer.dictionary import keywords, preproc_keywords, operators, brackets - - -def read_file(filename): - with open(filename) as f: - return f.read() - - -class TokenError(Exception): - def __init__(self, pos): - self.msg = f"Unrecognized token line {pos[0]}, col {pos[1]}" - - def __repr__(self): - return self.msg +from typing import Optional, Tuple, cast + +from norminette.exceptions import UnexpectedEOF, MaybeInfiniteLoop +from norminette.lexer.dictionary import digraphs, trigraphs +from norminette.lexer.dictionary import brackets +from norminette.lexer.dictionary import keywords +from norminette.lexer.dictionary import operators +from norminette.lexer.tokens import Token +from norminette.file import File +from norminette.errors import Error, Highlight as H + + +def c(a: str, b: str): + a = a.lower() + b = b.lower() + return ( + a + b, a.upper() + b, a + b.upper(), a.upper() + b.upper(), + b + a, b.upper() + a, b + a.upper(), b.upper() + a.upper(), + ) + + +quote_prefixes = (*"lLuU", "u8") +octal_digits = "01234567" +hexadecimal_digits = "0123456789abcdefABCDEF" +integer_suffixes = ( + '', + *"uUlLzZ", + "ll", "LL", + "wb", "WB", + "i64", "I64", + *c('u', 'l'), + *c('u', "ll"), + *c('u', 'z'), + *c('u', "wb"), + *c('u', "i64"), +) +float_suffixes = ( + '', + *"lLfFdD", + "dd", "DD", + "df", "DF", + "dl", "DL", + *c('f', 'i'), + *c('f', 'j'), +) + +INT_LITERAL_PATTERN = re.compile(r""" +^ +# (?P[-+]*) +(?P # prefix can be + 0[bBxX]* # 0, 0b, 0B, 0x, 0X, 0bb, 0BB, ... + | # or empty +) +(?P + # BUG If prefix is followed by two or more x, it doesn't works correctly + (?<=0[xX]) # is prefix for hex digits? + [\da-fA-F]+ # so, collect hex digits + | # otherwise + \d+ # collect decimal digits +) +(?P + (?<=[eE]) # is constant ending with an `E`? + [\w\d+\-.]* # so, collect `+` and `-` operators + | # otherwise + \w # collect suffixes that starts with an letter + [\w\d.]* # and letters, digits and dots that follows it + | # finally, do suffix be optional (empty) +) +""", re.VERBOSE) + + +def _float_pattern(const: str, digit: str, exponent: Tuple[str, str]): + pattern = r""" + ^ + (?P{0}) + (?P + (?: + [{2}]+[-+]{3}+ + |[{2}]+{3}+ + |(?:[{2}][+-]?(?:[.{3}]+)?)+ + ){1} + ) + (?P[\w\d._]*|) + """.format(const, *exponent, digit) + return re.compile(pattern, re.VERBOSE) + + +FLOAT_EXPONENT_LITERAL_PATTERN = _float_pattern(r"\d+", digit=r"\d", exponent=('', "eE")) +FLOAT_FRACTIONAL_LITERAL_PATTERN = _float_pattern(r"(?:\d+)?\.\d+|\d+\.", digit=r"\d", exponent=('?', "eE")) +FLOAT_HEXADECIMAL_LITERAL_PATTERN = _float_pattern(r"0[xX]+[\da-fA-F]+(?:\.[\da-fA-F]+)?", + digit=r"[\da-fA-F]", exponent=('?', "pP")) class Lexer: - def __init__(self, source_code, starting_line=1): - self.src = source_code - self.len = len(source_code) - self.__char = self.src[0] if self.src != "" else None + def __init__(self, file: File): + self.file = file + self.__pos = int(0) - self.__line_pos = int(starting_line) - self.__line = int(starting_line) - self.tokens = [] - - def peek_sub_string(self, size): - return self.src[self.__pos: self.__pos + size] - - def peek_char(self): - """ Return current character being checked, - if the character is a backslash character the following - character is appended to the return value. It will allow us to - parse escaped characters easier. - """ - if self.__pos < self.len: - if self.src[self.__pos] == '\\': - self.__char = self.src[self.__pos:self.__pos + 2] - else: - self.__char = self.src[self.__pos] - else: - self.__char = None - return self.__char + self.__line_pos = self.__line = 1 - def pop_char(self): - """ Pop a character that's been read by increasing self.__pos, - for escaped characters self.__pos will be increased twice - """ - if self.peek_char() == "\t": - # this calculates the 'visual offset' of a tab based on it's - # position on the line, if there's an easier way to calculate this - # you're welcome - self.__line_pos = int(( - self.__line_pos + 4 - - (self.__line_pos - 1) % 4) * 5 / 5) - else: - self.__line_pos += 1 - if self.__pos < self.len and self.src[self.__pos] == '\\': - self.__pos += 1 - self.__pos += 1 - return self.peek_char() + def raw_peek(self, *, offset: int = 0, collect: int = 1): + if (pos := self.__pos + offset) < len(self.file.source): + return ''.join(self.file.source[pos:pos+collect]) + return None - def peek_token(self): - return self.tokens[-1] + def peek(self, *, times: int = 1, offset: int = 0) -> Optional[Tuple[str, int]]: + char, size = '', 0 + for _ in range(times): + if (trigraph := self.raw_peek(offset=offset + size, collect=3)) in trigraphs: + char += trigraphs[trigraph] + size += 3 + elif (digraph := self.raw_peek(offset=offset + size, collect=2)) in digraphs: + char += digraphs[digraph] + size += 2 + elif word := self.raw_peek(offset=offset + size): + char += word + size += 1 + else: + break + if size: + return char, size + return None # Let it crash :D + + def pop( + self, + *, + times: int = 1, + use_spaces: bool = False, + use_escape: bool = False, + ) -> str: + result = "" + for _ in range(times): + for _ in range(100): + if peek := self.peek(): + char, size = peek + else: + raise UnexpectedEOF() + if char != '\\': + break + peek = self.peek(offset=size) + if peek is None: + break + temp, _ = peek # Don't change the `temp` to `char` + if temp != '\n': + if use_escape: + if temp in r"abefnrtv\"'?": + size += 1 + char += temp + elif temp == 'x': + size += 1 + char += temp + # BUG It is just considering one `byte` (0x0 to 0xFF), so it not works correctly + # with prefixed strings like `L"\0x1234"`. + peek = self.raw_peek(offset=size, collect=2) + if peek is None or peek[0] not in hexadecimal_digits: + error = Error.from_name("NO_HEX_DIGITS", level="Notice") + error.add_highlight(self.__line, self.__line_pos + size - 1, length=1) + self.file.errors.add(error) + else: + for digit in peek: + if digit not in hexadecimal_digits: + break + size += 1 + char += digit + elif temp in octal_digits: + while (temp := self.raw_peek(offset=size)) and temp in octal_digits: + size += 1 + char += temp + else: + error = Error.from_name("UNKNOWN_ESCAPE", level="Notice") + error.add_highlight(self.__line, self.__line_pos + size, length=1) + self.file.errors.add(error) + char += temp + size += 1 + break + self.__pos += size + 1 + self.__line += 1 + self.__line_pos = 0 + peek = self.peek() + if peek is None: + raise UnexpectedEOF() + char, size = peek + else: + # It hits when we have multiple lines followed by `\`, e.g: + # ```c + # // hello \ + # a \ + # b \ + # c\ + # \ + # a + # ``` + raise MaybeInfiniteLoop() + if char == '\n': + self.__line_pos = 0 + self.__line += 1 + if char == '\t': + self.__line_pos += (spaces := 4 - (self.__line_pos - 1) % 4) - 1 + if use_spaces: + char = ' ' * spaces + self.__line_pos += size + self.__pos += size + result += char + return result def line_pos(self): return self.__line, self.__line_pos - def is_string(self): - """ True if current character could start a string constant - """ - if self.peek_sub_string(2) == 'L"' or self.peek_char() == '"': - return True - else: - return False - - def is_constant(self): - """ True if current character could start a numeric constant - """ - if self.peek_char() in string.digits: - return True - elif self.peek_char() == ".": - for i in range(0, self.len - self.__pos): - if self.src[self.__pos + i] == ".": - i += 1 - elif self.src[self.__pos + i] in "0123456789": - return True - else: - return False - else: - return False - - def is_char_constant(self): - """ True if current character could start a character constant - """ - if self.peek_char() == '\'' or self.peek_sub_string(2) == "L'": - return True + def parse_char_literal(self) -> Optional[Token]: + pos = lineno, column = self.line_pos() + value = '' + for prefix in quote_prefixes: + length = len(prefix) + result = self.raw_peek(collect=length + 1) + if not result: + return + if result.startswith(prefix) and result.endswith('\''): + value += self.pop(times=length) + break + if self.raw_peek() != '\'': + return + chars = 0 + value += self.pop() + for _ in range(100): + try: + char = self.pop(use_escape=True) + except UnexpectedEOF: + error = Error.from_name("UNEXPECTED_EOF_CHR", highlights=[ + H(lineno, column, length=len(value)), + ]) + self.file.errors.add(error) + break + if char == '\n': + error = Error.from_name("UNEXPECTED_EOL_CHR", highlights=[ + H(lineno, column, length=len(value)), + H(lineno, column + len(value), length=1, hint="Perhaps you forgot a single quote (')?") + ]) + self.file.errors.add(error) + break + value += char + if char == '\'': + break + chars += 1 else: - return False - - def string(self): + raise MaybeInfiniteLoop() + if value == "''": + error = Error.from_name("EMPTY_CHAR", highlights=[H(*pos, length=2)]) + self.file.errors.add(error) + if chars > 1 and value.endswith('\''): + error = Error.from_name("CHAR_AS_STRING", highlights=[ + H(*pos, length=len(value)), + H(*pos, length=1, + hint="Perhaps you want a string (double quote, \") instead of a char (single quote, ')?"), + ]) + self.file.errors.add(error) + return Token("CHAR_CONST", pos, value=value) + + def parse_string_literal(self): """String constants can contain any characer except unescaped newlines. - An unclosed string or unescaped newline is a fatal error and thus - parsing will stop here. + An unclosed string or unescaped newline is a fatal error and thus + parsing will stop here. """ - pos = self.line_pos() - tkn_value = "" - if self.peek_char() == 'L': - tkn_value += self.peek_char() - self.pop_char() - tkn_value += self.peek_char() - self.pop_char() - while self.peek_char() not in [None]: - tkn_value += self.peek_char() - if self.peek_sub_string(2) == "\\\n": - self.__line += 1 - self.__line_pos = 1 - if self.peek_char() == '\"': + if not self.peek(): + return + pos = lineno, column = self.line_pos() + val = '' + for prefix in quote_prefixes: + length = len(prefix) + result = self.raw_peek(collect=length + 1) + if not result: + return + if result.startswith(prefix) and result.endswith('"'): + val += self.pop(times=length) + break + if self.raw_peek() != '"': + return + val += self.pop() + while self.peek() is not None: + char = self.pop(use_escape=True) + val += char + if char == '"': break - self.pop_char() else: - raise TokenError(pos) + error = Error.from_name("UNEXPECTED_EOF_STR") + error.add_highlight(*pos, length=len(val)) + error.add_highlight(lineno, column + len(val), length=1, hint="Perhaps you forgot a double quote (\")?") + self.file.errors.add(error) + return Token("STRING", pos, val) + + def parse_integer_literal(self): + # TODO Add to support single quote (') to separate digits according to C23 + + match = INT_LITERAL_PATTERN.match(self.file.source[self.__pos:]) + if match is None: return - self.tokens.append(Token("STRING", pos, tkn_value)) - self.pop_char() - def char_constant(self): - """Char constants follow pretty much the same rule as string constants - """ - pos = self.line_pos() - tkn_value = '\'' - self.pop_char() - while self.peek_char(): - tkn_value += self.peek_char() - if self.peek_char() == '\n': - self.pop_char() - self.tokens.append(Token("TKN_ERROR", pos)) - return - if self.peek_char() == '\'': - self.pop_char() - self.tokens.append(Token( - "CHAR_CONST", - pos, - tkn_value)) - return - self.pop_char() - raise TokenError(pos) + pos = lineno, column = self.line_pos() + token = Token("CONSTANT", pos, slice := self.pop(times=match.end())) - def constant(self): + if match["Suffix"] not in integer_suffixes: + suffix_length = len(match["Suffix"]) + string_length = len(slice) - suffix_length + if match["Suffix"][0] in "+-": + error = Error.from_name("MAXIMAL_MUNCH") + error.add_highlight(lineno, column + string_length, length=1, hint="Perhaps you forgot a space ( )?") + else: + error = Error.from_name("INVALID_SUFFIX") + error.add_highlight(lineno, column + string_length, length=suffix_length) + self.file.errors.add(error) + + def _check_bad_prefix(name: str, bucket: str): + error = Error.from_name(f"INVALID_{name}_INT") + for index, char in enumerate(match["Constant"], start=len(match["Prefix"])): + if char not in bucket: + error.add_highlight(lineno, column + index, length=1) + if error.highlights: + self.file.errors.add(error) + + if match["Prefix"] in ("0b", "0B"): + _check_bad_prefix("BIN", "01") + elif match["Prefix"] == '0': + _check_bad_prefix("OCT", "01234567") + elif match["Prefix"] in ("0x", "0X"): + _check_bad_prefix("HEX", "0123456789abcdefABCDEF") + + return token + + def parse_float_literal(self): """Numeric constants can take many forms: - - integer constants only allow digits [0-9] - - real number constant only allow digits [0-9], - ONE optionnal dot '.' and ONE optionnal 'e/E' character - - binary constant only allow digits [0-1] prefixed by '0b' or '0B' - - hex constant only allow digits [0-9], letters [a-f/A-F] prefixed - by '0x' or '0X' - - octal constants allow digits [0-9] prefixed by a zero '0' - character + - integer constants only allow digits [0-9] + - real number constant only allow digits [0-9], + ONE optionnal dot '.' and ONE optionnal 'e/E' character + - binary constant only allow digits [0-1] prefixed by '0b' or '0B' + - hex constant only allow digits [0-9], letters [a-f/A-F] prefixed + by '0x' or '0X' + - octal constants allow digits [0-9] prefixed by a zero '0' + character - Size ('l/L' for long) and sign ('u/U' for unsigned) specifiers can - be appended to any of those. tokens + Size ('l/L' for long) and sign ('u/U' for unsigned) specifiers can + be appended to any of those. tokens - Plus/minus operators ('+'/'-') can prefix any of those tokens + Plus/minus operators ('+'/'-') can prefix any of those tokens - a numeric constant could start with a '.' (dot character) + a numeric constant could start with a '.' (dot character) """ - pos = self.line_pos() - tkn_value = "" - bucket = ".0123456789aAbBcCdDeEfFlLuUxX-+" - while self.peek_char() and (self.peek_char() in bucket or self.peek_char() == "\\\n"): - if self.peek_char() in "xX": - if tkn_value.startswith("0") is False or len(tkn_value) > 1: - raise TokenError(pos) - for c in "xX": - if c in tkn_value: - raise TokenError(pos) - - elif self.peek_char() in "bB": - if tkn_value != "0" \ - and tkn_value.startswith("0x") is False \ - and tkn_value.startswith("0X") is False: - raise TokenError(pos) - - elif self.peek_char() in "+-": - if tkn_value.endswith("e") is False \ - and tkn_value.endswith("E") is False \ - or self.peek_sub_string(2) in ["++", "--"]: - break - - elif self.peek_char() in "eE" \ - and "0x" not in tkn_value and "0X" not in tkn_value: - if "e" in tkn_value or "E" in tkn_value \ - or "f" in tkn_value or "F" in tkn_value \ - or "u" in tkn_value or "U" in tkn_value \ - or "l" in tkn_value or "L" in tkn_value: - raise TokenError(pos) - - elif self.peek_char() in "lL": - lcount = tkn_value.count("l") + tkn_value.count("L") - if lcount > 1 or (lcount == 1 and tkn_value[-1] not in "lL") \ - or ("f" in tkn_value or "F" in tkn_value) \ - and "0x" not in tkn_value and "0X" not in tkn_value: - raise TokenError(pos) - elif self.peek_char() == 'l' and 'L' in tkn_value \ - or self.peek_char() == 'L' and 'l' in tkn_value: - raise TokenError(pos) - - elif self.peek_char() in "uU": - if "u" in tkn_value or "U" in tkn_value \ - or (("e" in tkn_value or "E" in tkn_value - or "f" in tkn_value or "F" in tkn_value) - and ( - "0x" not in tkn_value - and "0X" not in tkn_value)): - raise TokenError(pos) - - elif self.peek_char() in "Ff": - if tkn_value.startswith("0x") is False \ - and tkn_value.startswith("0X") is False \ - and ( - "." not in tkn_value - or "f" in tkn_value - or "F" in tkn_value) \ - or "u" in tkn_value or "U" in tkn_value \ - or "l" in tkn_value or "L" in tkn_value: - raise TokenError(pos) - - elif self.peek_char() in "aAbBcCdDeE" \ - and tkn_value.startswith("0x") is False \ - and tkn_value.startswith("0X") is False \ - or "u" in tkn_value or "U" in tkn_value \ - or "l" in tkn_value or "L" in tkn_value: - raise TokenError(pos) - - elif self.peek_char() in "0123456789" \ - and "u" in tkn_value or "U" in tkn_value \ - or "l" in tkn_value or "L" in tkn_value: - raise TokenError(pos) - - elif self.peek_char() == '.' and '.' in tkn_value: - raise TokenError(pos) - - tkn_value += self.peek_char() - self.pop_char() - if tkn_value[-1] in "eE" and tkn_value.startswith("0x") is False \ - or tkn_value[-1] in "xX": - raise TokenError(pos) + constant = self.raw_peek() + if constant is None: + return + pos = lineno, column = self.line_pos() + src = self.file.source[self.__pos:] + if match := FLOAT_EXPONENT_LITERAL_PATTERN.match(src): + type = "exponent" + elif match := FLOAT_FRACTIONAL_LITERAL_PATTERN.match(src): + type = "fractional" + elif match := FLOAT_HEXADECIMAL_LITERAL_PATTERN.match(src): + type = "hexadecimal" else: - self.tokens.append(Token("CONSTANT", pos, tkn_value)) - - def mult_comment(self): - pos = self.line_pos() - self.pop_char(), self.pop_char() - tkn_value = "/*" - while self.peek_char(): - if self.src[self.__pos:].startswith("*/"): - tkn_value += "*/" - self.pop_char(), self.pop_char() + return + error = None + suffix = len(match["Suffix"]) + column += len(match["Constant"]) + badhex = match["Constant"].strip(hexadecimal_digits + '.') + if type == "exponent" and not re.match(r"[eE][-+]?\d+", match["Exponent"]): + error = Error.from_name("BAD_EXPONENT") + error.add_highlight(lineno, column, length=len(match["Exponent"]) + suffix) + elif type == "hexadecimal" and '.' not in match["Constant"] and not match["Exponent"]: + return # Hexadecimal Integer + elif type == "hexadecimal" and badhex not in ('x', 'X'): + error = Error.from_name("MULTIPLE_X") + error.add_highlight(lineno, column - len(match["Constant"]) + 1, length=len(badhex)) + elif match["Constant"].count('.') == 1 and match["Suffix"].count('.') > 0: + error = Error.from_name("MULTIPLE_DOTS") + error.add_highlight(lineno, column, length=len(match["Exponent"]) + suffix) + elif match["Suffix"] not in float_suffixes: + error = Error.from_name("BAD_FLOAT_SUFFIX") + error.add_highlight(lineno, column + len(match["Exponent"]), length=suffix) + if error: + self.file.errors.add(error) + return Token("CONSTANT", pos, self.pop(times=match.end())) + + def parse_multi_line_comment(self) -> Optional[Token]: + if self.raw_peek(collect=2) != "/*": + return + pos = lineno, column = self.line_pos() + val = self.pop(times=2) + eof = False + while self.peek(): + try: + val += self.pop(use_spaces=True) + except UnexpectedEOF: + eof = True + break + if val.endswith("*/"): break - tkn_value += self.peek_char() - if self.peek_char() == '\n': - self.__line += 1 - self.__line_pos = 1 - self.pop_char() - if tkn_value.endswith("*/"): - self.tokens.append(Token("MULT_COMMENT", pos, tkn_value)) else: - raise TokenError(pos) - - def comment(self): + eof = True + if eof: + # TODO Add a better highlight since it is a multi-line token + error = Error.from_name("UNEXPECTED_EOF_MC") + error.add_highlight(lineno, column, length=len(val)) + self.file.errors.add(error) + return Token("MULT_COMMENT", pos, val) + + def parse_line_comment(self) -> Optional[Token]: """Comments are anything after '//' characters, up until a newline or - end of file + end of file """ + if self.raw_peek(collect=2) != "//": + return pos = self.line_pos() - tkn_value = "//" - self.pop_char(), self.pop_char() - while self.peek_char() is not None: - if self.peek_char() == '\n': - self.tokens.append(Token("COMMENT", pos, tkn_value)) - return - tkn_value += self.peek_char() - self.pop_char() - raise TokenError(pos) + val = self.pop(times=2) + while result := self.peek(): + char, _ = result + if char == '\n': + break + try: + val += self.pop() + except UnexpectedEOF: + break + return Token("COMMENT", pos, val) - def identifier(self): + def parse_identifier(self) -> Optional[Token]: """Identifiers can start with any letter [a-z][A-Z] or an underscore - and contain any letters [a-z][A-Z] digits [0-9] or underscores + and contain any letters [a-z][A-Z] digits [0-9] or underscores """ + char = self.raw_peek() + if not char or char not in string.ascii_letters + '_': + return pos = self.line_pos() - tkn_value = "" - while self.peek_char() and \ - ( - self.peek_char() in string.ascii_letters + "0123456789_" - or self.peek_char() == "\\\n"): - if self.peek_char() == "\\\n": - self.pop_char() - continue - tkn_value += self.peek_char() - self.pop_char() - if tkn_value in keywords: - self.tokens.append(Token(keywords[tkn_value], pos)) - - else: - self.tokens.append(Token("IDENTIFIER", pos, tkn_value)) + val = self.pop() + while char := self.raw_peek(): + if char not in string.ascii_letters + "0123456789_": + break + val += self.pop() + if val in keywords: + return Token(keywords[val], pos) + return Token("IDENTIFIER", pos, val) - def operator(self): + def parse_operator(self): """Operators can be made of one or more sign, so the longest operators - need to be looked up for first in order to avoid false positives - eg: '>>' being understood as two 'MORE_THAN' operators instead of - one 'RIGHT_SHIFT' operator + need to be looked up for first in order to avoid false positives + eg: '>>' being understood as two 'MORE_THAN' operators instead of + one 'RIGHT_SHIFT' operator """ + result = self.peek() + if not result: + return + char, _ = result + if char not in "+-*/,<>^&|!=%;:.~?#": + return pos = self.line_pos() - if self.peek_char() in ".+-*/%<>^&|!=": - - if self.peek_sub_string(3) in [">>=", "<<=", "..."]: - self.tokens.append(Token( - operators[self.peek_sub_string(3)], - pos)) - self.pop_char(), self.pop_char(), self.pop_char() - - elif self.peek_sub_string(2) in [">>", "<<", "->"]: - self.tokens.append(Token( - operators[self.peek_sub_string(2)], - pos)) - self.pop_char(), self.pop_char() - - elif self.peek_sub_string(2) == self.peek_char() + "=": - self.tokens.append(Token( - operators[self.peek_sub_string(2)], - pos)) - self.pop_char(), self.pop_char() - - elif self.peek_char() in "+-<>=&|": - if self.peek_sub_string(2) == self.peek_char() * 2: - self.tokens.append(Token( - operators[self.peek_sub_string(2)], - pos)) - self.pop_char() - self.pop_char() - - else: - self.tokens.append(Token( - operators[self.peek_char()], pos)) - self.pop_char() - - else: - self.tokens.append(Token( - operators[self.peek_char()], - pos)) - self.pop_char() - - else: - self.tokens.append(Token( - operators[self.src[self.__pos]], - pos)) - self.pop_char() - - def preprocessor(self): - pos = self.line_pos() - tkn_value = "" - while self.peek_char(): - tkn_value += self.peek_char() - self.pop_char() - if self.peek_sub_string(2) == "\\\n": - self.__line_pos = 1 - self.__line += 1 - # raise TokenError(self.line_pos()) - if self.peek_sub_string(2) in ["//", "/*"] \ - or self.peek_char() == '\n': - break - if len(tkn_value) <= 1: - raise TokenError(self.line_pos()) - tkn_key = tkn_value[1:].split()[0] - if tkn_key not in preproc_keywords and tkn_key[:len('include')] != 'include': - raise TokenError(self.line_pos()) - else: - if tkn_key not in preproc_keywords and tkn_key[:len('include')] == 'include': - tkn_key = 'include' - self.tokens.append(Token( - preproc_keywords.get(tkn_key), - pos, - tkn_value)) - - def get_next_token(self): + if char in ".+-*/%<>^&|!=": + if self.raw_peek(collect=3) in (">>=", "<<=", "..."): + return Token(operators[self.pop(times=3)], pos) + temp, _ = self.peek(times=2) # type: ignore + if temp in (">>", "<<", "->"): + return Token(operators[self.pop(times=2)], pos) + if temp == char + "=": + return Token(operators[self.pop(times=2)], pos) + if char in "+-<>=&|": + if temp == char * 2: + return Token(operators[self.pop(times=2)], pos) + char = self.pop() + return Token(operators[char], pos) + + def parse_whitespace(self) -> Optional[Token]: + char = self.raw_peek() + if char is None or char not in "\n\t ": + return + if char == ' ': + token = Token("SPACE", self.line_pos()) + elif char == "\t": + token = Token("TAB", self.line_pos()) + elif char == "\n": + token = Token("NEWLINE", self.line_pos()) + self.pop() + return token + + def parse_brackets(self) -> Optional[Token]: + result = self.peek() + if result is None: + return + char, _ = result + if char not in brackets: + return + start = self.line_pos() + value = self.pop() + return Token(brackets[value], start) + + parsers = ( + parse_float_literal, # Need to be above: + # `parse_operator` to avoid `` + # `parse_integer_literal` to avoid `\d+` + parse_integer_literal, + parse_char_literal, + parse_string_literal, + parse_identifier, # Need to be bellow `char` and `string` + parse_whitespace, + parse_line_comment, + parse_multi_line_comment, + parse_operator, + parse_brackets, + ) + + def get_next_token(self) -> Optional[Token]: """Peeks one character and tries to match it to a token type, - if it doesn't match any of the token types, an error will be raised - and current file's parsing will stop + if it doesn't match any of the token types, an error will be raised + and current file's parsing will stop """ - while self.peek_char() is not None: - if self.is_string(): - self.string() - - elif (self.peek_char().isalpha() and self.peek_char().isascii())or self.peek_char() == '_': - self.identifier() - - elif self.is_constant(): - self.constant() - - elif self.is_char_constant(): - self.char_constant() - - elif self.peek_char() == '#': - self.preprocessor() - - elif self.src[self.__pos:].startswith("/*"): - self.mult_comment() - - elif self.src[self.__pos:].startswith("//"): - self.comment() - - elif self.peek_char() in "+-*/,<>^&|!=%;:.~?": - self.operator() - - elif self.peek_char() == ' ': - self.tokens.append(Token("SPACE", self.line_pos())) - self.pop_char() - - elif self.peek_char() == '\t': - self.tokens.append(Token("TAB", self.line_pos())) - self.pop_char() - - elif self.peek_char() == '\n':# or ord(self.peek_char()) == 8203: - self.tokens.append(Token("NEWLINE", self.line_pos())) - self.pop_char() - self.__line_pos = 1 + while self.raw_peek(): + if self.raw_peek(collect=2) == "\\\n" or self.raw_peek(collect=4) == "??/\n": + # Avoid using `.pop()` here since it ignores the escaped + # newline and pops and upcomes after it. E.g, if we have + # `\\\nab` and use `.pop()`, the parsers funcs will see `b``. + _, size = self.peek() # type: ignore + self.__pos += cast(int, size) + 1 self.__line += 1 - - elif self.peek_char() == '\\\n': - self.tokens.append(Token("ESCAPED_NEWLINE", self.line_pos())) - self.pop_char() self.__line_pos = 1 - self.__line += 1 - - elif self.peek_char() in brackets: - self.tokens.append(Token( - brackets[self.peek_char()], - self.line_pos())) - self.pop_char() - - else: - raise TokenError(self.line_pos()) - - return self.peek_token() - - return None - - def get_tokens(self): - """Iterate through self.get_next_token() to convert source code into a - token list - """ - while self.get_next_token(): - continue - return self.tokens - - def print_tokens(self): - if self.tokens == []: - return - for t in self.tokens: - if t.type == "NEWLINE": - print(t) else: - print(t, end="") - if self.tokens[-1].type != "NEWLINE": - print("") + break + for parser in self.parsers: + if result := parser(self): + return result + if char := self.raw_peek(): + error = Error("BAD_LEXEME", f"No matchable token for '{char}' lexeme") + error.add_highlight(*self.line_pos(), length=1) + self.file.errors.add(error) + self.__pos += 1 + self.__line_pos += 1 + # BUG If we have multiples bad lexemes, it can raise RecursionError + return self.get_next_token() - def check_tokens(self): - """ - Only used for testing - """ - if self.tokens == []: - self.get_tokens() - if self.tokens == []: - return "" - ret = "" - for i in range(0, len(self.tokens)): - ret += self.tokens[i].test() - ret += "" if self.tokens[i].type != "NEWLINE" else "\n" - if self.tokens[-1].type != "NEWLINE": - ret += "\n" - return ret + def __iter__(self): + while token := self.get_next_token(): + yield token diff --git a/norminette/lexer/tokens.py b/norminette/lexer/tokens.py index cc56f03b..763d9384 100644 --- a/norminette/lexer/tokens.py +++ b/norminette/lexer/tokens.py @@ -1,24 +1,39 @@ -from lexer.dictionary import operators, brackets, keywords, preproc_keywords +from typing import Optional, Tuple +from dataclasses import dataclass, field +@dataclass(eq=True, repr=True) class Token: - def __init__(self, tkn_type, pos, tkn_value=None): - self.type = str(tkn_type) - self.pos = pos - if tkn_value is not None: - self.value = str(tkn_value) - self.length = len(tkn_value) - else: - self.value = None - self.length = 0 - - def __repr__(self): + type: str + pos: Tuple[int, int] + value: Optional[str] = field(default=None) + + @property + def length(self) -> int: + return len(self.value or '') + + @property + def unsafe_length(self) -> Optional[int]: + if self.value is None: + return None + return self.length + + @property + def lineno(self) -> int: + return self.pos[0] + + @property + def column(self) -> int: + return self.pos[1] + + @property + def line_column(self): + return self.pos[1] + + def __str__(self): """ Token representation for debugging, using the format or simply when value is None """ - r = f'<{self.type}={self.value}>' if self.value else f'<{self.type}>' + r = f"<{self.type}={self.value}>" if self.value else f"<{self.type}>" return r - - def test(self): - return self.__repr__() diff --git a/norminette/locale/en_US/LC_MESSAGES/norminette.po b/norminette/locale/en_US/LC_MESSAGES/norminette.po new file mode 100644 index 00000000..d621ac0a --- /dev/null +++ b/norminette/locale/en_US/LC_MESSAGES/norminette.po @@ -0,0 +1,571 @@ +# English translations for PACKAGE package. +# Copyright (C) 2025 THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# Automatically generated, 2025. +# +msgid "" +msgstr "" +"Project-Id-Version: 3.3.59\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-04-11 10:12-0300\n" +"PO-Revision-Date: 2025-04-09 17:51-0300\n" +"Last-Translator: Automatically generated\n" +"Language-Team: none\n" +"Language: en_US\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: norminette/norm_error.py:4 +msgid "Spaces at beginning of line" +msgstr "" + +#: norminette/norm_error.py:5 norminette/norm_error.py:35 +msgid "Found tab when expecting space" +msgstr "" + +#: norminette/norm_error.py:6 +msgid "Two or more consecutives spaces" +msgstr "" + +#: norminette/norm_error.py:7 +msgid "Two or more consecutives white spaces" +msgstr "" + +#: norminette/norm_error.py:8 +msgid "missing space before operator" +msgstr "" + +#: norminette/norm_error.py:9 +msgid "missing space after operator" +msgstr "" + +#: norminette/norm_error.py:10 +msgid "extra space before operator" +msgstr "" + +#: norminette/norm_error.py:11 +msgid "extra space after operator" +msgstr "" + +#: norminette/norm_error.py:12 +msgid "Missing space after parenthesis (brace/bracket)" +msgstr "" + +#: norminette/norm_error.py:13 +msgid "Missing space before parenthesis (brace/bracket)" +msgstr "" + +#: norminette/norm_error.py:14 +msgid "Extra space after parenthesis (brace/bracket)" +msgstr "" + +#: norminette/norm_error.py:15 +msgid "Extra space before parenthesis (brace/bracket)" +msgstr "" + +#: norminette/norm_error.py:16 +msgid "space after pointer" +msgstr "" + +#: norminette/norm_error.py:17 +msgid "Unexpected space/tab at line start" +msgstr "" + +#: norminette/norm_error.py:18 +msgid "bad spacing before pointer" +msgstr "" + +#: norminette/norm_error.py:19 +msgid "Found space when expecting tab before function name" +msgstr "" + +#: norminette/norm_error.py:20 +msgid "extra tabs before function name" +msgstr "" + +#: norminette/norm_error.py:21 +msgid "extra tabs before typedef name" +msgstr "" + +#: norminette/norm_error.py:22 +msgid "missing tab before function name" +msgstr "" + +#: norminette/norm_error.py:23 +msgid "missing tab before variable name" +msgstr "" + +#: norminette/norm_error.py:24 +msgid "Missing tab before typedef name" +msgstr "" + +#: norminette/norm_error.py:25 +msgid "extra tab before variable name" +msgstr "" + +#: norminette/norm_error.py:26 +msgid "line too long" +msgstr "" + +#: norminette/norm_error.py:27 +msgid "Expected parenthesis" +msgstr "" + +#: norminette/norm_error.py:28 +msgid "missing type qualifier or identifier in function arguments" +msgstr "" + +#: norminette/norm_error.py:29 +msgid "" +"user defined identifiers should contain only lowercase characters, digits or " +"'_'" +msgstr "" + +#: norminette/norm_error.py:31 +msgid "Missing tabs for indent level" +msgstr "" + +#: norminette/norm_error.py:32 +msgid "Extra tabs for indent level" +msgstr "" + +#: norminette/norm_error.py:33 +msgid "Extra whitespaces for indent level" +msgstr "" + +#: norminette/norm_error.py:34 +msgid "Found space when expecting tab" +msgstr "" + +#: norminette/norm_error.py:36 +msgid "Function has more than 25 lines" +msgstr "" + +#: norminette/norm_error.py:37 +msgid "Space on empty line" +msgstr "" + +#: norminette/norm_error.py:38 +msgid "Space before newline" +msgstr "" + +#: norminette/norm_error.py:39 +msgid "Too many instructions on a single line" +msgstr "" + +#: norminette/norm_error.py:40 +msgid "Missing space after preprocessor directive" +msgstr "" + +#: norminette/norm_error.py:41 +msgid "Unrecognized preprocessor statement" +msgstr "" + +#: norminette/norm_error.py:42 +msgid "Preprocessor statement not at the beginning of the line" +msgstr "" + +#: norminette/norm_error.py:43 +msgid "Preprocessor statement must only contain constant defines" +msgstr "" + +#: norminette/norm_error.py:44 +msgid "Expected EOL after preprocessor statement" +msgstr "" + +#: norminette/norm_error.py:45 +msgid "If preprocessor statement without endif" +msgstr "" + +#: norminette/norm_error.py:46 +msgid "Elif preprocessor statement without if or elif" +msgstr "" + +#: norminette/norm_error.py:47 +msgid "Ifdef preprocessor statement without endif" +msgstr "" + +#: norminette/norm_error.py:48 +msgid "Ifndef preprocessor statement without endif" +msgstr "" + +#: norminette/norm_error.py:49 +msgid "Else preprocessor statement without if or elif" +msgstr "" + +#: norminette/norm_error.py:50 +msgid "Endif preprocessor statement without if, elif or else" +msgstr "" + +#: norminette/norm_error.py:51 +msgid "Bad preprocessor indentation" +msgstr "" + +#: norminette/norm_error.py:52 +msgid "Multiline preprocessor statement is forbidden" +msgstr "" + +#: norminette/norm_error.py:53 +msgid "Preprocessor statements are only allowed in the global scope" +msgstr "" + +#: norminette/norm_error.py:54 +msgid "User defined typedef must start with t_" +msgstr "" + +#: norminette/norm_error.py:55 +msgid "Structure name must start with s_" +msgstr "" + +#: norminette/norm_error.py:56 +msgid "Enum name must start with e_" +msgstr "" + +#: norminette/norm_error.py:57 +msgid "Union name must start with u_" +msgstr "" + +#: norminette/norm_error.py:58 +msgid "Global variable must start with g_" +msgstr "" + +#: norminette/norm_error.py:59 +msgid "Missing whitespace before typedef name" +msgstr "" + +#: norminette/norm_error.py:60 +msgid "Global variable present in file. Make sure it is a reasonable choice." +msgstr "" + +#: norminette/norm_error.py:61 +msgid "Logic operator at the end of line" +msgstr "" + +#: norminette/norm_error.py:62 +msgid "Empty line at start of file" +msgstr "" + +#: norminette/norm_error.py:63 +msgid "Empty line in function" +msgstr "" + +#: norminette/norm_error.py:64 +msgid "Empty line at end of file" +msgstr "" + +#: norminette/norm_error.py:65 +msgid "Variable declared in incorrect scope" +msgstr "" + +#: norminette/norm_error.py:66 +msgid "Missing type in variable declaration" +msgstr "" + +#: norminette/norm_error.py:67 +msgid "Variable declaration not at start of function" +msgstr "" + +#: norminette/norm_error.py:68 +msgid "Too many variables declarations in a function" +msgstr "" + +#: norminette/norm_error.py:69 +msgid "Too many functions in file" +msgstr "" + +#: norminette/norm_error.py:70 +msgid "Expected newline after brace" +msgstr "" + +#: norminette/norm_error.py:71 +msgid "Consecutive newlines" +msgstr "" + +#: norminette/norm_error.py:72 +msgid "Functions must be separated by a newline" +msgstr "" + +#: norminette/norm_error.py:73 +msgid "Variable declarations must be followed by a newline" +msgstr "" + +#: norminette/norm_error.py:74 +msgid "Preprocessor statement must be followed by a newline" +msgstr "" + +#: norminette/norm_error.py:75 +msgid "Multiple assignations on a single line" +msgstr "" + +#: norminette/norm_error.py:76 +msgid "Multiple declarations on a single line" +msgstr "" + +#: norminette/norm_error.py:77 +msgid "Declaration and assignation on a single line" +msgstr "" + +#: norminette/norm_error.py:78 +msgid "Forbidden control structure" +msgstr "" + +#: norminette/norm_error.py:79 +msgid "Missing space after keyword" +msgstr "" + +#: norminette/norm_error.py:80 +msgid "Return value must be in parenthesis" +msgstr "Return value must be in parenthesis" + +#: norminette/norm_error.py:81 +msgid "Expected semicolon" +msgstr "" + +#: norminette/norm_error.py:82 +msgid "Expected tab" +msgstr "" + +#: norminette/norm_error.py:83 +msgid "Empty function argument requires void" +msgstr "" + +#: norminette/norm_error.py:84 +msgid "Misaligned variable declaration" +msgstr "" + +#: norminette/norm_error.py:85 +msgid "Misaligned function declaration" +msgstr "" + +#: norminette/norm_error.py:86 +msgid "Comment is invalid in this scope" +msgstr "" + +#: norminette/norm_error.py:87 +msgid "Macro name must be capitalized" +msgstr "" + +#: norminette/norm_error.py:88 +msgid "Macro functions are forbidden" +msgstr "" + +#: norminette/norm_error.py:89 +msgid "Assignment in control structure" +msgstr "" + +#: norminette/norm_error.py:90 +msgid "Variable length array forbidden" +msgstr "" + +#: norminette/norm_error.py:91 +msgid "Function has more than 4 arguments" +msgstr "" + +#: norminette/norm_error.py:92 +msgid ".c file includes are forbidden" +msgstr "" + +#: norminette/norm_error.py:93 +msgid "Include must be at the start of file" +msgstr "" + +#: norminette/norm_error.py:94 +msgid "Header protection must include all the instructions" +msgstr "" + +#: norminette/norm_error.py:95 +msgid "Instructions after header protection are forbidden" +msgstr "" + +#: norminette/norm_error.py:96 +msgid "Wrong header protection name" +msgstr "" + +#: norminette/norm_error.py:97 +msgid "Header protection must be in uppercase" +msgstr "" + +#: norminette/norm_error.py:98 +msgid "Multiple header protections, only one is allowed" +msgstr "" + +#: norminette/norm_error.py:99 +msgid "Header protection not containing #define" +msgstr "" + +#: norminette/norm_error.py:100 +msgid "Ternaries are forbidden" +msgstr "" + +#: norminette/norm_error.py:101 +msgid "Too many values on define" +msgstr "" + +#: norminette/norm_error.py:102 +msgid "Newline in declaration" +msgstr "" + +#: norminette/norm_error.py:103 +msgid "Multiple instructions in single line control structure" +msgstr "" + +#: norminette/norm_error.py:104 +msgid "Newline in define" +msgstr "" + +#: norminette/norm_error.py:105 +msgid "Missing identifier in typedef declaration" +msgstr "" + +#: norminette/norm_error.py:106 +msgid "Label statements are forbidden" +msgstr "" + +#: norminette/norm_error.py:107 +msgid "Goto statements are forbidden" +msgstr "" + +#: norminette/norm_error.py:108 +msgid "Preprocessors can only be used in the global scope" +msgstr "" + +#: norminette/norm_error.py:109 +msgid "Function prototype in incorrect scope" +msgstr "" + +#: norminette/norm_error.py:110 +msgid "Statement is in incorrect scope" +msgstr "" + +#: norminette/norm_error.py:111 +msgid "Incorrect values in define" +msgstr "" + +#: norminette/norm_error.py:112 +msgid "Expected newline before brace" +msgstr "" + +#: norminette/norm_error.py:113 +msgid "Expected newline after control structure" +msgstr "" + +#: norminette/norm_error.py:114 +msgid "Unrecognized variable type" +msgstr "" + +#: norminette/norm_error.py:115 +msgid "Comment must be on its own line or at end of a line" +msgstr "" + +#: norminette/norm_error.py:116 +msgid "Comma at line start" +msgstr "" + +#: norminette/norm_error.py:117 +msgid "Mixed spaces and tabs" +msgstr "" + +#: norminette/norm_error.py:118 +msgid "Function attribute must be at the end of line" +msgstr "" + +#: norminette/norm_error.py:119 +msgid "Missing or invalid 42 header" +msgstr "" + +#: norminette/norm_error.py:120 +msgid "Missing space between include and filename" +msgstr "" + +#: norminette/norm_error.py:121 +msgid "Enums, structs and unions need to be defined only in global scope" +msgstr "" + +#: norminette/norm_error.py:122 +msgid "Typedef declaration are not allowed in .c files" +msgstr "" + +#: norminette/norm_error.py:123 +msgid "Struct declaration are not allowed in .c files" +msgstr "" + +#: norminette/norm_error.py:124 +msgid "Union declaration are not allowed in .c files" +msgstr "" + +#: norminette/norm_error.py:125 +msgid "Enum declaration are not allowed in .c files" +msgstr "" + +#: norminette/norm_error.py:126 +msgid "Unexpected end of file (EOF) while parsing a char" +msgstr "" + +#: norminette/norm_error.py:127 +msgid "Unexpected end of line (EOL) while parsing a char" +msgstr "" + +#: norminette/norm_error.py:128 +msgid "Unexpected end of file (EOF) while parsing a multiline comment" +msgstr "" + +#: norminette/norm_error.py:129 +msgid "Unexpected end of file (EOF) while parsing a string" +msgstr "" + +#: norminette/norm_error.py:130 +msgid "Empty character constant" +msgstr "" + +#: norminette/norm_error.py:131 +msgid "Character constants can have only one character" +msgstr "" + +#: norminette/norm_error.py:132 +msgid "This suffix is invalid" +msgstr "" + +#: norminette/norm_error.py:133 +msgid "Invalid suffix for float/double literal constant" +msgstr "" + +#: norminette/norm_error.py:134 +msgid "Invalid binary integer literal" +msgstr "" + +#: norminette/norm_error.py:135 +msgid "Invalid octal integer literal" +msgstr "" + +#: norminette/norm_error.py:136 +msgid "Invalid hexadecimal integer literal" +msgstr "" + +#: norminette/norm_error.py:137 +msgid "Potential maximal munch detected" +msgstr "" + +#: norminette/norm_error.py:138 +msgid "No hexadecimal digits followed by the \\x" +msgstr "" + +#: norminette/norm_error.py:139 +msgid "Unknown escape sequence" +msgstr "" + +#: norminette/norm_error.py:140 +msgid "Exponent has no digits" +msgstr "" + +#: norminette/norm_error.py:141 +msgid "Multiple dots in float constant" +msgstr "" + +#: norminette/norm_error.py:142 +msgid "Multiple 'x' in hexadecimal float constant" +msgstr "" + +#~ msgid "Hello, World!" +#~ msgstr "Hello, World!" diff --git a/norminette/locale/pt_BR/LC_MESSAGES/norminette.po b/norminette/locale/pt_BR/LC_MESSAGES/norminette.po new file mode 100644 index 00000000..af0cc0a4 --- /dev/null +++ b/norminette/locale/pt_BR/LC_MESSAGES/norminette.po @@ -0,0 +1,576 @@ +# Portuguese translations for PACKAGE package. +# Copyright (C) 2025 THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# Automatically generated, 2025. +# +msgid "" +msgstr "" +"Project-Id-Version: 3.3.59\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-04-11 10:12-0300\n" +"PO-Revision-Date: 2025-04-09 17:51-0300\n" +"Last-Translator: Automatically generated\n" +"Language-Team: none\n" +"Language: pt_BR\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +#: norminette/norm_error.py:4 +msgid "Spaces at beginning of line" +msgstr "Espaços no começo da linha" + +#: norminette/norm_error.py:5 norminette/norm_error.py:35 +msgid "Found tab when expecting space" +msgstr "Encontrado tabulação ao invés espaço" + +#: norminette/norm_error.py:6 +msgid "Two or more consecutives spaces" +msgstr "Dois ou mais espaços consecutivos" + +#: norminette/norm_error.py:7 +msgid "Two or more consecutives white spaces" +msgstr "Dois ou mais espaços em branco consecutivos" + +#: norminette/norm_error.py:8 +msgid "missing space before operator" +msgstr "Faltando espaço antes do operador" + +#: norminette/norm_error.py:9 +msgid "missing space after operator" +msgstr "Faltando espaço após o operador" + +#: norminette/norm_error.py:10 +msgid "extra space before operator" +msgstr "Espaço a mais antes do operador" + +#: norminette/norm_error.py:11 +msgid "extra space after operator" +msgstr "Espaço a mais depois do operador" + +#: norminette/norm_error.py:12 +msgid "Missing space after parenthesis (brace/bracket)" +msgstr "Faltando espaço após parêntese (chave/colchete)" + +#: norminette/norm_error.py:13 +msgid "Missing space before parenthesis (brace/bracket)" +msgstr "Faltando espaço antes de parêntese (chave/colchete)" + +#: norminette/norm_error.py:14 +msgid "Extra space after parenthesis (brace/bracket)" +msgstr "Espaço extra após parêntese (chave/colchete)" + +#: norminette/norm_error.py:15 +msgid "Extra space before parenthesis (brace/bracket)" +msgstr "Espaço extra antes de parêntese (chave/colchete)" + +#: norminette/norm_error.py:16 +msgid "space after pointer" +msgstr "Espaço após ponteiro" + +#: norminette/norm_error.py:17 +msgid "Unexpected space/tab at line start" +msgstr "Espaço/tabulação inesperado no início da linha" + +#: norminette/norm_error.py:18 +msgid "bad spacing before pointer" +msgstr "Espaçamento incorreto antes do ponteiro" + +#: norminette/norm_error.py:19 +msgid "Found space when expecting tab before function name" +msgstr "Espaço encontrado onde era esperada tabulação antes do nome da função" + +#: norminette/norm_error.py:20 +msgid "extra tabs before function name" +msgstr "Tabulações extras antes do nome da função" + +#: norminette/norm_error.py:21 +msgid "extra tabs before typedef name" +msgstr "Tabulações extras antes do nome do typedef" + +#: norminette/norm_error.py:22 +msgid "missing tab before function name" +msgstr "Faltando tabulação antes do nome da função" + +#: norminette/norm_error.py:23 +msgid "missing tab before variable name" +msgstr "Faltando tabulação antes do nome da variável" + +#: norminette/norm_error.py:24 +msgid "Missing tab before typedef name" +msgstr "Faltando tabulação antes do nome do typedef" + +#: norminette/norm_error.py:25 +msgid "extra tab before variable name" +msgstr "Tabulação extra antes do nome da variável" + +#: norminette/norm_error.py:26 +msgid "line too long" +msgstr "Linha muito longa" + +#: norminette/norm_error.py:27 +msgid "Expected parenthesis" +msgstr "Parêntese esperado" + +#: norminette/norm_error.py:28 +msgid "missing type qualifier or identifier in function arguments" +msgstr "" +"Faltando qualificador de tipo ou identificador nos argumentos da função" + +#: norminette/norm_error.py:29 +msgid "" +"user defined identifiers should contain only lowercase characters, digits or " +"'_'" +msgstr "" +"Identificadores definidos pelo usuário devem conter apenas letras " +"minúsculas, dígitos ou '_'" + +#: norminette/norm_error.py:31 +msgid "Missing tabs for indent level" +msgstr "Faltando tabulações para o nível de indentação" + +#: norminette/norm_error.py:32 +msgid "Extra tabs for indent level" +msgstr "Tabulações extras para o nível de indentação" + +#: norminette/norm_error.py:33 +msgid "Extra whitespaces for indent level" +msgstr "Espaços extras para o nível de indentação" + +#: norminette/norm_error.py:34 +msgid "Found space when expecting tab" +msgstr "Espaço encontrado onde era esperado tabulação" + +#: norminette/norm_error.py:36 +msgid "Function has more than 25 lines" +msgstr "A função tem mais de 25 linhas" + +#: norminette/norm_error.py:37 +msgid "Space on empty line" +msgstr "Espaço em linha vazia" + +#: norminette/norm_error.py:38 +msgid "Space before newline" +msgstr "Espaço antes da nova linha" + +#: norminette/norm_error.py:39 +msgid "Too many instructions on a single line" +msgstr "Muitas instruções em uma única linha" + +#: norminette/norm_error.py:40 +msgid "Missing space after preprocessor directive" +msgstr "Faltando espaço após a diretiva do pré-processador" + +#: norminette/norm_error.py:41 +msgid "Unrecognized preprocessor statement" +msgstr "Declaração de pré-processador não reconhecida" + +#: norminette/norm_error.py:42 +msgid "Preprocessor statement not at the beginning of the line" +msgstr "Declaração de pré-processador não está no início da linha" + +#: norminette/norm_error.py:43 +msgid "Preprocessor statement must only contain constant defines" +msgstr "Declaração de pré-processador deve conter apenas constantes definidas" + +#: norminette/norm_error.py:44 +msgid "Expected EOL after preprocessor statement" +msgstr "Esperado fim de linha após a declaração do pré-processador" + +#: norminette/norm_error.py:45 +msgid "If preprocessor statement without endif" +msgstr "Declaração de pré-processador 'if' sem 'endif'" + +#: norminette/norm_error.py:46 +msgid "Elif preprocessor statement without if or elif" +msgstr "Declaração de pré-processador 'elif' sem 'if' ou 'elif'" + +#: norminette/norm_error.py:47 +msgid "Ifdef preprocessor statement without endif" +msgstr "Declaração de pré-processador 'ifdef' sem 'endif'" + +#: norminette/norm_error.py:48 +msgid "Ifndef preprocessor statement without endif" +msgstr "Declaração de pré-processador 'ifndef' sem 'endif'" + +#: norminette/norm_error.py:49 +msgid "Else preprocessor statement without if or elif" +msgstr "Declaração de pré-processador 'else' sem 'if' ou 'elif'" + +#: norminette/norm_error.py:50 +msgid "Endif preprocessor statement without if, elif or else" +msgstr "Declaração de pré-processador 'endif' sem 'if', 'elif' ou 'else'" + +#: norminette/norm_error.py:51 +msgid "Bad preprocessor indentation" +msgstr "Indentação incorreta no pré-processador" + +#: norminette/norm_error.py:52 +msgid "Multiline preprocessor statement is forbidden" +msgstr "Declaração de pré-processador em múltiplas linhas é proibida" + +#: norminette/norm_error.py:53 +msgid "Preprocessor statements are only allowed in the global scope" +msgstr "Declarações de pré-processador são permitidas apenas no escopo global" + +#: norminette/norm_error.py:54 +msgid "User defined typedef must start with t_" +msgstr "Typedef definido pelo usuário deve começar com t_" + +#: norminette/norm_error.py:55 +msgid "Structure name must start with s_" +msgstr "Nome da estrutura deve começar com s_" + +#: norminette/norm_error.py:56 +msgid "Enum name must start with e_" +msgstr "Nome do enum deve começar com e_" + +#: norminette/norm_error.py:57 +msgid "Union name must start with u_" +msgstr "Nome da união deve começar com u_" + +#: norminette/norm_error.py:58 +msgid "Global variable must start with g_" +msgstr "Variável global deve começar com g_" + +#: norminette/norm_error.py:59 +msgid "Missing whitespace before typedef name" +msgstr "Espaço em branco faltando antes do nome do typedef" + +#: norminette/norm_error.py:60 +msgid "Global variable present in file. Make sure it is a reasonable choice." +msgstr "" +"Variável global presente no arquivo. Certifique-se de que é uma escolha " +"razoável." + +#: norminette/norm_error.py:61 +msgid "Logic operator at the end of line" +msgstr "Operador lógico no final da linha" + +#: norminette/norm_error.py:62 +msgid "Empty line at start of file" +msgstr "Linha vazia no início do arquivo" + +#: norminette/norm_error.py:63 +msgid "Empty line in function" +msgstr "Linha vazia dentro da função" + +#: norminette/norm_error.py:64 +msgid "Empty line at end of file" +msgstr "Linha vazia no final do arquivo" + +#: norminette/norm_error.py:65 +msgid "Variable declared in incorrect scope" +msgstr "Variável declarada no escopo incorreto" + +#: norminette/norm_error.py:66 +msgid "Missing type in variable declaration" +msgstr "Faltando tipo na declaração da variável" + +#: norminette/norm_error.py:67 +msgid "Variable declaration not at start of function" +msgstr "Declaração de variável não está no início da função" + +#: norminette/norm_error.py:68 +msgid "Too many variables declarations in a function" +msgstr "Muitas declarações de variáveis em uma função" + +#: norminette/norm_error.py:69 +msgid "Too many functions in file" +msgstr "Muitas funções no arquivo" + +#: norminette/norm_error.py:70 +msgid "Expected newline after brace" +msgstr "Esperada nova linha após a chave" + +#: norminette/norm_error.py:71 +msgid "Consecutive newlines" +msgstr "Novas linhas consecutivas" + +#: norminette/norm_error.py:72 +msgid "Functions must be separated by a newline" +msgstr "Funções devem ser separadas por uma linha em branco" + +#: norminette/norm_error.py:73 +msgid "Variable declarations must be followed by a newline" +msgstr "Declarações de variáveis devem ser seguidas por uma nova linha" + +#: norminette/norm_error.py:74 +msgid "Preprocessor statement must be followed by a newline" +msgstr "Declaração de pré-processador deve ser seguida por uma nova linha" + +#: norminette/norm_error.py:75 +msgid "Multiple assignations on a single line" +msgstr "Múltiplas atribuições em uma única linha" + +#: norminette/norm_error.py:76 +msgid "Multiple declarations on a single line" +msgstr "Múltiplas declarações em uma única linha" + +#: norminette/norm_error.py:77 +msgid "Declaration and assignation on a single line" +msgstr "Declaração e atribuição na mesma linha" + +#: norminette/norm_error.py:78 +msgid "Forbidden control structure" +msgstr "Estrutura de controle proibida" + +#: norminette/norm_error.py:79 +msgid "Missing space after keyword" +msgstr "Faltando espaço após a palavra-chave" + +#: norminette/norm_error.py:80 +msgid "Return value must be in parenthesis" +msgstr "" + +#: norminette/norm_error.py:81 +msgid "Expected semicolon" +msgstr "Esperado ponto e vírgula" + +#: norminette/norm_error.py:82 +msgid "Expected tab" +msgstr "Tabulação esperada" + +#: norminette/norm_error.py:83 +msgid "Empty function argument requires void" +msgstr "Argumento de função vazio requer void" + +#: norminette/norm_error.py:84 +msgid "Misaligned variable declaration" +msgstr "Declaração de variável desalinhada" + +#: norminette/norm_error.py:85 +msgid "Misaligned function declaration" +msgstr "Declaração de função desalinhada" + +#: norminette/norm_error.py:86 +msgid "Comment is invalid in this scope" +msgstr "Comentário inválido neste escopo" + +#: norminette/norm_error.py:87 +msgid "Macro name must be capitalized" +msgstr "Nome de macro deve estar em letras maiúsculas" + +#: norminette/norm_error.py:88 +msgid "Macro functions are forbidden" +msgstr "Funções de macro são proibidas" + +#: norminette/norm_error.py:89 +msgid "Assignment in control structure" +msgstr "Atribuição dentro de estrutura de controle" + +#: norminette/norm_error.py:90 +msgid "Variable length array forbidden" +msgstr "Arrays de tamanho variável são proibidos" + +#: norminette/norm_error.py:91 +msgid "Function has more than 4 arguments" +msgstr "A função possui mais de 4 argumentos" + +#: norminette/norm_error.py:92 +msgid ".c file includes are forbidden" +msgstr "Inclusões de arquivos .c são proibidas" + +#: norminette/norm_error.py:93 +msgid "Include must be at the start of file" +msgstr "Instruções de include devem estar no início do arquivo" + +#: norminette/norm_error.py:94 +msgid "Header protection must include all the instructions" +msgstr "A proteção de cabeçalho deve englobar todas as instruções" + +#: norminette/norm_error.py:95 +msgid "Instructions after header protection are forbidden" +msgstr "Instruções após a proteção de cabeçalho são proibidas" + +#: norminette/norm_error.py:96 +msgid "Wrong header protection name" +msgstr "Nome da proteção de cabeçalho incorreto" + +#: norminette/norm_error.py:97 +msgid "Header protection must be in uppercase" +msgstr "A proteção de cabeçalho deve estar em letras maiúsculas" + +#: norminette/norm_error.py:98 +msgid "Multiple header protections, only one is allowed" +msgstr "Múltiplas proteções de cabeçalho — apenas uma é permitida" + +#: norminette/norm_error.py:99 +msgid "Header protection not containing #define" +msgstr "Proteção de cabeçalho não contém #define" + +#: norminette/norm_error.py:100 +msgid "Ternaries are forbidden" +msgstr "Operadores ternários são proibidos" + +#: norminette/norm_error.py:101 +msgid "Too many values on define" +msgstr "Muitos valores em uma diretiva define" + +#: norminette/norm_error.py:102 +msgid "Newline in declaration" +msgstr "Nova linha na declaração" + +#: norminette/norm_error.py:103 +msgid "Multiple instructions in single line control structure" +msgstr "Múltiplas instruções em uma única linha de estrutura de controle" + +#: norminette/norm_error.py:104 +msgid "Newline in define" +msgstr "Nova linha em uma diretiva define" + +#: norminette/norm_error.py:105 +msgid "Missing identifier in typedef declaration" +msgstr "Identificador ausente na declaração de typedef" + +#: norminette/norm_error.py:106 +msgid "Label statements are forbidden" +msgstr "Declarações de rótulo (label) são proibidas" + +#: norminette/norm_error.py:107 +msgid "Goto statements are forbidden" +msgstr "Declarações goto são proibidas" + +#: norminette/norm_error.py:108 +msgid "Preprocessors can only be used in the global scope" +msgstr "Pré-processadores só podem ser usados no escopo global" + +#: norminette/norm_error.py:109 +msgid "Function prototype in incorrect scope" +msgstr "Protótipo de função em escopo incorreto" + +#: norminette/norm_error.py:110 +msgid "Statement is in incorrect scope" +msgstr "Instrução está em escopo incorreto" + +#: norminette/norm_error.py:111 +msgid "Incorrect values in define" +msgstr "Valores incorretos em uma diretiva define" + +#: norminette/norm_error.py:112 +msgid "Expected newline before brace" +msgstr "Esperada nova linha antes da chave" + +#: norminette/norm_error.py:113 +msgid "Expected newline after control structure" +msgstr "Esperada nova linha após a estrutura de controle" + +#: norminette/norm_error.py:114 +msgid "Unrecognized variable type" +msgstr "Tipo de variável não reconhecido" + +#: norminette/norm_error.py:115 +msgid "Comment must be on its own line or at end of a line" +msgstr "Comentário deve estar em sua própria linha ou ao fim de uma linha" + +#: norminette/norm_error.py:116 +msgid "Comma at line start" +msgstr "Vírgula no início da linha" + +#: norminette/norm_error.py:117 +msgid "Mixed spaces and tabs" +msgstr "Mistura de espaços e tabulações" + +#: norminette/norm_error.py:118 +msgid "Function attribute must be at the end of line" +msgstr "Atributo da função deve estar no final da linha" + +#: norminette/norm_error.py:119 +msgid "Missing or invalid 42 header" +msgstr "Cabeçalho 42 ausente ou inválido" + +#: norminette/norm_error.py:120 +msgid "Missing space between include and filename" +msgstr "Faltando espaço entre include e nome do arquivo" + +#: norminette/norm_error.py:121 +msgid "Enums, structs and unions need to be defined only in global scope" +msgstr "Enums, structs e unions devem ser definidos apenas no escopo global" + +#: norminette/norm_error.py:122 +msgid "Typedef declaration are not allowed in .c files" +msgstr "Declarações typedef não são permitidas em arquivos .c" + +#: norminette/norm_error.py:123 +msgid "Struct declaration are not allowed in .c files" +msgstr "Declarações struct não são permitidas em arquivos .c" + +#: norminette/norm_error.py:124 +msgid "Union declaration are not allowed in .c files" +msgstr "Declarações union não são permitidas em arquivos .c" + +#: norminette/norm_error.py:125 +msgid "Enum declaration are not allowed in .c files" +msgstr "Declarações enum não são permitidas em arquivos .c" + +#: norminette/norm_error.py:126 +msgid "Unexpected end of file (EOF) while parsing a char" +msgstr "Fim de arquivo inesperado (EOF) ao analisar um caractere" + +#: norminette/norm_error.py:127 +msgid "Unexpected end of line (EOL) while parsing a char" +msgstr "Fim de linha inesperado (EOL) ao analisar um caractere" + +#: norminette/norm_error.py:128 +msgid "Unexpected end of file (EOF) while parsing a multiline comment" +msgstr "Fim de arquivo inesperado (EOF) ao analisar um comentário multilinha" + +#: norminette/norm_error.py:129 +msgid "Unexpected end of file (EOF) while parsing a string" +msgstr "Fim de arquivo inesperado (EOF) ao analisar uma string" + +#: norminette/norm_error.py:130 +msgid "Empty character constant" +msgstr "Constante de caractere vazia" + +#: norminette/norm_error.py:131 +msgid "Character constants can have only one character" +msgstr "Constantes de caractere devem conter apenas um caractere" + +#: norminette/norm_error.py:132 +msgid "This suffix is invalid" +msgstr "Esse sufixo é inválido" + +#: norminette/norm_error.py:133 +msgid "Invalid suffix for float/double literal constant" +msgstr "Sufixo inválido para constante literal float/double" + +#: norminette/norm_error.py:134 +msgid "Invalid binary integer literal" +msgstr "Literal inteiro binário inválido" + +#: norminette/norm_error.py:135 +msgid "Invalid octal integer literal" +msgstr "Literal inteiro octal inválido" + +#: norminette/norm_error.py:136 +msgid "Invalid hexadecimal integer literal" +msgstr "Literal inteiro hexadecimal inválido" + +#: norminette/norm_error.py:137 +msgid "Potential maximal munch detected" +msgstr "Possível ocorrência de 'maximal munch' detectada" + +#: norminette/norm_error.py:138 +msgid "No hexadecimal digits followed by the \\x" +msgstr "Nenhum dígito hexadecimal após o \\x" + +#: norminette/norm_error.py:139 +msgid "Unknown escape sequence" +msgstr "Sequência de escape desconhecida" + +#: norminette/norm_error.py:140 +msgid "Exponent has no digits" +msgstr "Expoente sem dígitos" + +#: norminette/norm_error.py:141 +msgid "Multiple dots in float constant" +msgstr "Múltiplos pontos em constante float" + +#: norminette/norm_error.py:142 +msgid "Multiple 'x' in hexadecimal float constant" +msgstr "Múltiplos 'x' em constante float hexadecimal" + +#~ msgid "space before function name" +#~ msgstr "Espaço antes do nome da função" diff --git a/norminette/norm_error.py b/norminette/norm_error.py index 6abd1a52..07b75ab4 100644 --- a/norminette/norm_error.py +++ b/norminette/norm_error.py @@ -1,105 +1,143 @@ +from norminette.i18n import _ + errors = { - "SPC_INSTEAD_TAB": "Spaces at beginning of line", - "TAB_INSTEAD_SPC": "Found tab when expecting space", - "CONSECUTIVE_SPC": "Two or more consecutives spaces", - "SPC_BFR_OPERATOR": "missing space before operator", - "SPC_AFTER_OPERATOR": "missing space after operator", - "NO_SPC_BFR_OPR": "extra space before operator", - "NO_SPC_AFR_OPR": "extra space after operator", - "SPC_AFTER_PAR": "Missing space after parenthesis (brace/bracket)", - "SPC_BFR_PAR": "Missing space before parenthesis (brace/bracket)", - "NO_SPC_AFR_PAR": "Extra space after parenthesis (brace/bracket)", - "NO_SPC_BFR_PAR": "Extra space before parenthesis (brace/bracket)", - "SPC_AFTER_POINTER": "space after pointer", - "SPC_BFR_POINTER": "bad spacing before pointer", - "SPACE_BEFORE_FUNC": "space before function name", - "TOO_MANY_TABS_FUNC": "extra tabs before function name", - "MISSING_TAB_FUNC": "missing tab before function name", - "MISSING_TAB_VAR": "missing tab before variable name", - "TOO_MANY_TAB_VAR": "extra tab before variable name", - "LINE_TOO_LONG": "line too long", - "EXP_PARENTHESIS": "Expected parenthesis", - "MISSING_IDENTIFIER": "missing type qualifier or identifier in function arguments", - "FORBIDDEN_CHAR_NAME": "user defined identifiers should contain only lowercase characters, \ -digits or '_'", - "TOO_FEW_TAB": "Missing tabs for indent level", - "TOO_MANY_TAB": "Extra tabs for indent level", - "SPACE_REPLACE_TAB": "Found space when expecting tab", - "TAB_REPLACE_SPACE": "Found tab when expecting space", - "TOO_MANY_LINES": "Function has more than 25 lines", - "SPACE_EMPTY_LINE": "Space on empty line", - "SPC_BEFORE_NL": "Space before newline", - "TOO_MANY_INSTR": "Too many instructions on a single line", - "PREPROC_UKN_STATEMENT": "Unrecognized preprocessor statement", - "PREPROC_START_LINE": "Preprocessor statement not at the beginning of the line", - "PREPROC_CONSTANT": "Preprocessor statement must only contain constant defines", - "PREPROC_EXPECTED_EOL": "Expected EOL after preprocessor statement", - "PREPROC_BAD_INDENT": "Bad preprocessor indentation", - "USER_DEFINED_TYPEDEF": "User defined typedef must start with t_", - "STRUCT_TYPE_NAMING": "Structure name must start with s_", - "ENUM_TYPE_NAMING": "Enum name must start with e_", - "UNION_TYPE_NAMING": "Union name must start with u_", - "GLOBAL_VAR_NAMING": "Global variable must start with g_", - "EOL_OPERATOR": "Logic operator at the end of line", - "EMPTY_LINE_FUNCTION": "Empty line in function", - "EMPTY_LINE_FILE_START": "Empty line at start of file", - "EMPTY_LINE_FUNCTION": "Empty line in function", - "EMPTY_LINE_EOF": "Empty line at end of file", - "WRONG_SCOPE_VAR": "Variable declared in incorrect scope", - "VAR_DECL_START_FUNC": "Variable declaration not at start of function", - "TOO_MANY_FUNCS": "Too many functions in file", - "BRACE_SHOULD_EOL": "Expected newline after brace", - "CONSECUTIVE_NEWLINES": "Consecutive newlines", - "NEWLINE_PRECEDES_FUNC": "Functions must be separated by a newline", - "NL_AFTER_VAR_DECL": "Variable declarations must be followed by a newline", - "MULT_ASSIGN_LINE": "Multiple assignations on a single line", - "MULT_DECL_LINE": "Multiple declarations on a single line", - "DECL_ASSIGN_LINE": "Declaration and assignation on a single line", - "FORBIDDEN_CS": "Forbidden control structure", - "SPACE_AFTER_KW": "Missing space after keyword", - "RETURN_PARENTHESIS": "Return value must be in parenthesis", - "EXP_SEMI_COLON": "Expected semicolon", - "EXP_TAB": "Expected tab", - "NO_ARGS_VOID": "Empty function argument requires void", - "MISALIGNED_VAR_DECL": "Misaligned variable declaration", - "MISALIGNED_FUNC_DECL": "Misaligned function declaration", - "WRONG_SCOPE_COMMENT": "Comment is invalid in this scope", - "MACRO_NAME_CAPITAL": "Macro name must be capitalized", - "ASSIGN_IN_CONTROL": "Assignment in control structure", - "VLA_FORBIDDEN": "Variable length array forbidden", - "TOO_MANY_ARGS": "Function has more than 4 arguments", - "INCLUDE_HEADER_ONLY": ".c file includes are forbidden", - "INCLUDE_START_FILE": "Include must be at the start of file", - "HEADER_PROT_ALL": "Header protection must include all the instructions", - "HEADER_PROT_NAME": "Wrong header protection name", - "TERNARY_FBIDDEN": "Ternaries are forbidden", - "TOO_MANY_VALS": "Too many values on define", - "NEWLINE_IN_DECL": "Newline in declaration", - "MULT_IN_SINGLE_INSTR": "Multiple instructions in single line control structure", - "NEWLINE_DEFINE": "Newline in define", - "MISSING_TYPEDEF_ID": "Missing identifier in typedef declaration", - "LABEL_FBIDDEN": "Label statements are forbidden", - "PREPROC_GLOBAL": "Preprocessors can only be used in the global scope", - "WRONG_SCOPE_FCT": "Function prototype in incorrect scope", - "WRONG_SCOPE": "Statement is in incorrect scope", - "INCORRECT_DEFINE": "Incorrect values in define", - "BRACE_NEWLINE": "Expected newline before brace", - "EXP_NEWLINE": "Expected newline after control structure", - "ARG_TYPE_UKN": "Unrecognized variable type", - "COMMENT_ON_INSTR": "Comment must be on its own line", - "COMMA_START_LINE": "Comma at line start" + "SPC_INSTEAD_TAB": _("Spaces at beginning of line"), + "TAB_INSTEAD_SPC": _("Found tab when expecting space"), + "CONSECUTIVE_SPC": _("Two or more consecutives spaces"), + "CONSECUTIVE_WS": _("Two or more consecutives white spaces"), + "SPC_BFR_OPERATOR": _("missing space before operator"), + "SPC_AFTER_OPERATOR": _("missing space after operator"), + "NO_SPC_BFR_OPR": _("extra space before operator"), + "NO_SPC_AFR_OPR": _("extra space after operator"), + "SPC_AFTER_PAR": _("Missing space after parenthesis (brace/bracket)"), + "SPC_BFR_PAR": _("Missing space before parenthesis (brace/bracket)"), + "NO_SPC_AFR_PAR": _("Extra space after parenthesis (brace/bracket)"), + "NO_SPC_BFR_PAR": _("Extra space before parenthesis (brace/bracket)"), + "SPC_AFTER_POINTER": _("space after pointer"), + "SPC_LINE_START": _("Unexpected space/tab at line start"), + "SPC_BFR_POINTER": _("bad spacing before pointer"), + "SPACE_BEFORE_FUNC": _("Found space when expecting tab before function name"), + "TOO_MANY_TABS_FUNC": _("extra tabs before function name"), + "TOO_MANY_TABS_TD": _("extra tabs before typedef name"), + "MISSING_TAB_FUNC": _("missing tab before function name"), + "MISSING_TAB_VAR": _("missing tab before variable name"), + "MISSING_TAB_TYPDEF": _("Missing tab before typedef name"), + "TOO_MANY_TAB_VAR": _("extra tab before variable name"), + "LINE_TOO_LONG": _("line too long"), + "EXP_PARENTHESIS": _("Expected parenthesis"), + "MISSING_IDENTIFIER": _("missing type qualifier or identifier in function arguments"), + "FORBIDDEN_CHAR_NAME": _("user defined identifiers should contain only lowercase characters, \ +digits or '_'"), + "TOO_FEW_TAB": _("Missing tabs for indent level"), + "TOO_MANY_TAB": _("Extra tabs for indent level"), + "TOO_MANY_WS": _("Extra whitespaces for indent level"), + "SPACE_REPLACE_TAB": _("Found space when expecting tab"), + "TAB_REPLACE_SPACE": _("Found tab when expecting space"), + "TOO_MANY_LINES": _("Function has more than 25 lines"), + "SPACE_EMPTY_LINE": _("Space on empty line"), + "SPC_BEFORE_NL": _("Space before newline"), + "TOO_MANY_INSTR": _("Too many instructions on a single line"), + "PREPROC_NO_SPACE": _("Missing space after preprocessor directive"), + "PREPROC_UKN_STATEMENT": _("Unrecognized preprocessor statement"), + "PREPROC_START_LINE": _("Preprocessor statement not at the beginning of the line"), + "PREPROC_CONSTANT": _("Preprocessor statement must only contain constant defines"), + "PREPROC_EXPECTED_EOL": _("Expected EOL after preprocessor statement"), + "PREPROC_BAD_IF": _("If preprocessor statement without endif"), + "PREPROC_BAD_ELIF": _("Elif preprocessor statement without if or elif"), + "PREPROC_BAD_IFDEF": _("Ifdef preprocessor statement without endif"), + "PREPROC_BAD_IFNDEF": _("Ifndef preprocessor statement without endif"), + "PREPROC_BAD_ELSE": _("Else preprocessor statement without if or elif"), + "PREPROC_BAD_ENDIF": _("Endif preprocessor statement without if, elif or else"), + "PREPROC_BAD_INDENT": _("Bad preprocessor indentation"), + "PREPROC_MULTLINE": _("Multiline preprocessor statement is forbidden"), + "PREPOC_ONLY_GLOBAL": _("Preprocessor statements are only allowed in the global scope"), + "USER_DEFINED_TYPEDEF": _("User defined typedef must start with t_"), + "STRUCT_TYPE_NAMING": _("Structure name must start with s_"), + "ENUM_TYPE_NAMING": _("Enum name must start with e_"), + "UNION_TYPE_NAMING": _("Union name must start with u_"), + "GLOBAL_VAR_NAMING": _("Global variable must start with g_"), + "NO_TAB_BF_TYPEDEF": _("Missing whitespace before typedef name"), + "GLOBAL_VAR_DETECTED": _("Global variable present in file. Make sure it is a reasonable choice."), + "EOL_OPERATOR": _("Logic operator at the end of line"), + "EMPTY_LINE_FILE_START": _("Empty line at start of file"), + "EMPTY_LINE_FUNCTION": _("Empty line in function"), + "EMPTY_LINE_EOF": _("Empty line at end of file"), + "WRONG_SCOPE_VAR": _("Variable declared in incorrect scope"), + "IMPLICIT_VAR_TYPE": _("Missing type in variable declaration"), + "VAR_DECL_START_FUNC": _("Variable declaration not at start of function"), + "TOO_MANY_VARS_FUNC": _("Too many variables declarations in a function"), + "TOO_MANY_FUNCS": _("Too many functions in file"), + "BRACE_SHOULD_EOL": _("Expected newline after brace"), + "CONSECUTIVE_NEWLINES": _("Consecutive newlines"), + "NEWLINE_PRECEDES_FUNC": _("Functions must be separated by a newline"), + "NL_AFTER_VAR_DECL": _("Variable declarations must be followed by a newline"), + "NL_AFTER_PREPROC": _("Preprocessor statement must be followed by a newline"), + "MULT_ASSIGN_LINE": _("Multiple assignations on a single line"), + "MULT_DECL_LINE": _("Multiple declarations on a single line"), + "DECL_ASSIGN_LINE": _("Declaration and assignation on a single line"), + "FORBIDDEN_CS": _("Forbidden control structure"), + "SPACE_AFTER_KW": _("Missing space after keyword"), + "RETURN_PARENTHESIS": _("Return value must be in parenthesis"), + "EXP_SEMI_COLON": _("Expected semicolon"), + "EXP_TAB": _("Expected tab"), + "NO_ARGS_VOID": _("Empty function argument requires void"), + "MISALIGNED_VAR_DECL": _("Misaligned variable declaration"), + "MISALIGNED_FUNC_DECL": _("Misaligned function declaration"), + "WRONG_SCOPE_COMMENT": _("Comment is invalid in this scope"), + "MACRO_NAME_CAPITAL": _("Macro name must be capitalized"), + "MACRO_FUNC_FORBIDDEN": _("Macro functions are forbidden"), + "ASSIGN_IN_CONTROL": _("Assignment in control structure"), + "VLA_FORBIDDEN": _("Variable length array forbidden"), + "TOO_MANY_ARGS": _("Function has more than 4 arguments"), + "INCLUDE_HEADER_ONLY": _(".c file includes are forbidden"), + "INCLUDE_START_FILE": _("Include must be at the start of file"), + "HEADER_PROT_ALL": _("Header protection must include all the instructions"), + "HEADER_PROT_ALL_AF": _("Instructions after header protection are forbidden"), + "HEADER_PROT_NAME": _("Wrong header protection name"), + "HEADER_PROT_UPPER": _("Header protection must be in uppercase"), + "HEADER_PROT_MULT": _("Multiple header protections, only one is allowed"), + "HEADER_PROT_NODEF": _("Header protection not containing #define"), + "TERNARY_FBIDDEN": _("Ternaries are forbidden"), + "TOO_MANY_VALS": _("Too many values on define"), + "NEWLINE_IN_DECL": _("Newline in declaration"), + "MULT_IN_SINGLE_INSTR": _("Multiple instructions in single line control structure"), + "NEWLINE_DEFINE": _("Newline in define"), + "MISSING_TYPEDEF_ID": _("Missing identifier in typedef declaration"), + "LABEL_FBIDDEN": _("Label statements are forbidden"), + "GOTO_FBIDDEN": _("Goto statements are forbidden"), + "PREPROC_GLOBAL": _("Preprocessors can only be used in the global scope"), + "WRONG_SCOPE_FCT": _("Function prototype in incorrect scope"), + "WRONG_SCOPE": _("Statement is in incorrect scope"), + "INCORRECT_DEFINE": _("Incorrect values in define"), + "BRACE_NEWLINE": _("Expected newline before brace"), + "EXP_NEWLINE": _("Expected newline after control structure"), + "ARG_TYPE_UKN": _("Unrecognized variable type"), + "COMMENT_ON_INSTR": _("Comment must be on its own line or at end of a line"), + "COMMA_START_LINE": _("Comma at line start"), + "MIXED_SPACE_TAB": _("Mixed spaces and tabs"), + "ATTR_EOL": _("Function attribute must be at the end of line"), + "INVALID_HEADER": _("Missing or invalid 42 header"), + "INCLUDE_MISSING_SP": _("Missing space between include and filename"), + "TYPE_NOT_GLOBAL": _("Enums, structs and unions need to be defined only in global scope"), + "FORBIDDEN_TYPEDEF": _("Typedef declaration are not allowed in .c files"), + "FORBIDDEN_STRUCT": _("Struct declaration are not allowed in .c files"), + "FORBIDDEN_UNION": _("Union declaration are not allowed in .c files"), + "FORBIDDEN_ENUM": _("Enum declaration are not allowed in .c files"), + "UNEXPECTED_EOF_CHR": _("Unexpected end of file (EOF) while parsing a char"), + "UNEXPECTED_EOL_CHR": _("Unexpected end of line (EOL) while parsing a char"), + "UNEXPECTED_EOF_MC": _("Unexpected end of file (EOF) while parsing a multiline comment"), + "UNEXPECTED_EOF_STR": _("Unexpected end of file (EOF) while parsing a string"), + "EMPTY_CHAR": _("Empty character constant"), + "CHAR_AS_STRING": _("Character constants can have only one character"), + "INVALID_SUFFIX": _("This suffix is invalid"), + "BAD_FLOAT_SUFFIX": _("Invalid suffix for float/double literal constant"), + "INVALID_BIN_INT": _("Invalid binary integer literal"), + "INVALID_OCT_INT": _("Invalid octal integer literal"), + "INVALID_HEX_INT": _("Invalid hexadecimal integer literal"), + "MAXIMAL_MUNCH": _("Potential maximal munch detected"), + "NO_HEX_DIGITS": _("No hexadecimal digits followed by the \\x"), + "UNKNOWN_ESCAPE": _("Unknown escape sequence"), + "BAD_EXPONENT": _("Exponent has no digits"), + "MULTIPLE_DOTS": _("Multiple dots in float constant"), + "MULTIPLE_X": _("Multiple 'x' in hexadecimal float constant"), } -class NormError: - def __init__(self, errno, line, col=None): - self.errno = errno - self.line = line - self.col = col - if col is not None: - self.error_pos = f"(line: {(str(self.line)).rjust(3)}, col: {(str(self.col)).rjust(3)}):\t" - else: - self.error_pos = f"(line: {(str(self.line)).rjust(3)}):\t " - self.prefix = f"\t{self.errno:<20} {self.error_pos:>21}" - self.error_msg = f"{errors.get(self.errno, 'ERROR NOT FOUND')}" - - def __str__(self): - return self.prefix + self.error_msg \ No newline at end of file diff --git a/norminette/registry.py b/norminette/registry.py index bf317a24..03cda22b 100644 --- a/norminette/registry.py +++ b/norminette/registry.py @@ -1,69 +1,63 @@ -import rules -from context import Context -from functools import cmp_to_key -from exceptions import CParsingError +import collections +from operator import attrgetter + +from norminette.rules import Rules, Primary +from norminette.exceptions import CParsingError + +rules = Rules() -def sort_errs(a, b): - if a.col == b.col and a.line == b.line: - return 1 if a.errno > b.errno else -1 - return a.col - b.col if a.line == b.line else a.line - b.line class Registry: - global has_err def __init__(self): - self.rules = rules.rules - self.primary_rules = rules.primary_rules - self.dependencies = {} - for k, r in self.rules.items(): - r.register(self) + self.dependencies = collections.defaultdict(list) + for rule in rules.checks: + rule.register(self) + for name, dependencies in self.dependencies.items(): + self.dependencies[name] = sorted(dependencies, reverse=True, key=attrgetter("__name__")) def run_rules(self, context, rule): - if rule.name.startswith("Is"): - ret, read = rule.run(context) - else: - #print (rule.name) - ret = False - read = 0 - rule.run(context) - #print(context.history, context.tokens[:5], rule) - #if rule.name.startswith("Is"): - #print (rule.name, ret) - if ret is True: + rule = rule(context) + result = rule.run(context) + ret, read = result if isinstance(rule, Primary) else (False, 0) + if ret: context.scope.instructions += 1 - if rule.name.startswith("Is"): - #print ("Line", context.tokens[0].pos[0], rule.name) + if isinstance(rule, Primary): context.tkn_scope = read - context.history.append(rule.name) - for r in self.dependencies.get(rule.name, []): - self.run_rules(context, self.rules[r]) - if 'all' in self.dependencies: - for r in self.dependencies['all']: - self.run_rules(context, self.rules[r]) - #context.history.pop(-1) + context.history.append(rule) + for rule in self.dependencies[rule.name]: + self.run_rules(context, rule) + for rule in self.dependencies["_rule"]: + self.run_rules(context, rule) context.tkn_scope = 0 return ret, read - def run(self, context, source): + def run(self, context): """ - Main function for each file. - Primary rules are determined by the prefix "Is" and - are run by order of priority as defined in each class - Each secondary rule is then run in arbitrary order based on their - dependencies + Main function for each file. + Primary rules are determined by the prefix "Is" and + are run by order of priority as defined in each class + Each secondary rule is then run in arbitrary order based on their + dependencies """ unrecognized_tkns = [] + context.state = "starting" + for rule in self.dependencies["_start"]: + self.run_rules(context, rule) + context.state = "running" while context.tokens != []: context.tkn_scope = len(context.tokens) - for rule in self.primary_rules: - if type(context.scope) not in rule.scope and rule.scope != []: + for rule in rules.primaries: + if rule.scope and context.scope not in rule.scope: continue ret, jump = self.run_rules(context, rule) if ret is True: if unrecognized_tkns != []: if context.debug == 0: - raise CParsingError(f"Unrecognized line {unrecognized_tkns[0].pos} while parsing line {unrecognized_tkns}") - print ('uncaught -> ', context.filename) - print ('uncaught -> ', unrecognized_tkns) + raise CParsingError( + f"Error: Unrecognized line {unrecognized_tkns[0].pos} while parsing line {unrecognized_tkns}" # noqa: E501 + ) + print("uncaught -> ", context.file.name) + print("uncaught -> ", unrecognized_tkns) unrecognized_tkns = [] context.dprint(rule.name, jump) context.update() @@ -71,18 +65,14 @@ def run(self, context, source): break # ############################################################# else: # Remove these one ALL primary rules are done - # print("#, ", context.tokens[0]) + # print("#, ", context.tokens[0]) unrecognized_tkns.append(context.tokens[0]) context.pop_tokens(1) # ################################## # ############################################################# + context.state = "ending" + for rule in self.dependencies["_end"]: + self.run_rules(context, rule) if unrecognized_tkns != []: - print (context.debug) + print(context.debug) if context.debug > 0: - print ("uncaught ->", unrecognized_tkns) - if context.errors == []: - print(context.filename + ": OK!") - else: - print(context.filename + ": KO!") - context.errors = sorted(context.errors, key=cmp_to_key(sort_errs)) - for err in context.errors: - print(err) + print("uncaught ->", unrecognized_tkns) diff --git a/norminette/rules/__init__.py b/norminette/rules/__init__.py index 2539de4a..73df2a78 100644 --- a/norminette/rules/__init__.py +++ b/norminette/rules/__init__.py @@ -1,34 +1,30 @@ import importlib import os -from .rule import Rule, PrimaryRule -from glob import glob -from functools import cmp_to_key +from operator import attrgetter +from norminette.rules.rule import Rule, Primary, Check -path = os.path.dirname(os.path.realpath(__file__)) -files = glob(path + "/check_*.py") -rules = {} -primary_rules = {} +class Rules: + __slots__ = ( + "all", + "primaries", + "checks", + ) -for f in files: - mod_name = f.split('/')[-1].split('.')[0] - class_name = "".join([s.capitalize() for s in mod_name.split('_')]) - module = importlib.import_module("rules." + mod_name) - rule = getattr(module, class_name) - rule = rule() - rules[class_name] = rule + __instance = None -files = glob(path + "/is_*.py") + def __new__(cls): + if not cls.__instance: + cls.__instance = super().__new__(cls) + return cls.__instance -for f in files: - mod_name = f.split('/')[-1].split('.')[0] - class_name = "".join([s.capitalize() for s in mod_name.split('_')]) - module = importlib.import_module("rules." + mod_name) - rule = getattr(module, class_name) - primary_rules[class_name] = rule() + def __init__(self) -> None: + path = os.path.dirname(os.path.realpath(__file__)) + for f in os.listdir(path): + name, _ = os.path.splitext(f) + importlib.import_module("norminette.rules." + name) - -primary_rules = [v for k, v in sorted( - primary_rules.items(), - key=lambda item: -item[1].priority)] + self.all = Rule.__subclasses__() + self.checks = Check.__subclasses__() + self.primaries = sorted(Primary.__subclasses__(), reverse=True, key=attrgetter("priority")) diff --git a/norminette/rules/check_assignation.py b/norminette/rules/check_assignation.py index 88d9f3dd..6ca344aa 100644 --- a/norminette/rules/check_assignation.py +++ b/norminette/rules/check_assignation.py @@ -1,5 +1,5 @@ -from rules import Rule -from scope import * +from norminette.rules import Rule, Check + assigns = [ "RIGHT_ASSIGN", "LEFT_ASSIGN", @@ -14,43 +14,77 @@ "ASSIGN", ] +special_assigns = ["INC", "DEC"] + -class CheckAssignation(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsAssignation"] +class CheckAssignation(Rule, Check): + depends_on = ( + "IsAssignation", + ) + + def check_brace_assign(self, context, i): + i += 1 + deep = 1 + while context.check_token(i, "RBRACE") is False and deep > 0: + i += 1 + return True, i - def check_assign_right(self, context, i): + def check_assign_right(self, context, i, mini_assign=False): tmp_typ = None start = 0 while context.check_token(i, "SEMI_COLON") is False: typ = None + if context.check_token(i, "LBRACE"): + ret, i = self.check_brace_assign(context, i) + if ret is False: + return True, i + break if context.check_token(i, "LPARENTHESIS") is True: start = i tmp_typ, i = context.parenthesis_contain(i) - if tmp_typ != None: + if tmp_typ is not None: typ = tmp_typ + if tmp_typ == "assign": + context.new_error("MULT_ASSIGN_LINE", context.peek_token(start)) if tmp_typ is None: tmp = start + 1 - while context.peek_token(tmp) and context.check_token(tmp, "RPARENTHESIS") is False: - if context.check_token(tmp, "COMMA") is True and typ is not None: + while ( + context.peek_token(tmp) + and context.check_token(tmp, "RPARENTHESIS") is False + ): + if ( + context.check_token(tmp, "COMMA") is True + and typ is not None + ): context.new_error("TOO_MANY_INSTR", context.peek_token(tmp)) tmp += 1 if context.check_token(i, assigns) is True: - context.new_error("MULT_ASSIGN_LINE", context.peek_token(i)) + if mini_assign is True: + mini_assign = False + else: + context.new_error("MULT_ASSIGN_LINE", context.peek_token(i)) + if context.check_token(i, special_assigns) is True: + if mini_assign is True: + context.new_error("MULT_ASSIGN_LINE", context.peek_token(i)) i += 1 return False, 0 def run(self, context): """ - Only one assignation at a time - Unless the variable is static (or global), you cannot assign its value when you declare it. + Only one assignation at a time + Unless the variable is static (or global), you cannot assign its value when you declare it. """ i = 0 assign_present = False + mini_assign = False while context.check_token(i, "SEMI_COLON") is False: - if context.check_token(i, assigns) is True and assign_present == False: + if ( + context.check_token(i, assigns + special_assigns) is True + and assign_present is False + ): assign_present = True - return self.check_assign_right(context, i + 1) + if context.check_token(i, special_assigns): + mini_assign = True + return self.check_assign_right(context, i + 1, mini_assign) i += 1 return False, 0 diff --git a/norminette/rules/check_assignation_indent.py b/norminette/rules/check_assignation_indent.py index b6d2ad3b..c5b55ca5 100644 --- a/norminette/rules/check_assignation_indent.py +++ b/norminette/rules/check_assignation_indent.py @@ -1,5 +1,7 @@ -from rules import Rule -from scope import * +from norminette.rules import Rule, Check +from norminette.exceptions import CParsingError + + operators = [ "RIGHT_ASSIGN", "LEFT_ASSIGN", @@ -39,18 +41,22 @@ nest_kw = ["RPARENTHESIS", "LPARENTHESIS", "NEWLINE"] -class CheckAssignationIndent(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsAssignation", "IsFuncPrototype", "IsFunctionCall"] + +class CheckAssignationIndent(Rule, Check): + depends_on = ( + "IsAssignation", + "IsFuncPrototype", + "IsFunctionCall", + "IsVarDeclaration", + ) def run(self, context): """ - Declared variables must be aligned using tabs with other variables on the same scope + Declared variables must be aligned using tabs with other variables on the same scope """ i = 0 expected = context.scope.indent - if context.history[-1] == "IsAssignation": + if context.history[-1] in ["IsAssignation", "IsVarDeclaration"]: nest = expected + 1 elif context.history[-1] == "IsFuncPrototype": nest = context.func_alignment @@ -61,18 +67,34 @@ def run(self, context): if context.check_token(i - 1, operators) is True: context.new_error("EOL_OPERATOR", context.peek_token(i)) tmp = context.skip_ws(i + 1) - if context.check_token(tmp, 'COMMA'): + if context.check_token(tmp, "COMMA"): context.new_error("COMMA_START_LINE", context.peek_token(i)) got = 0 i += 1 while context.check_token(i + got, "TAB") is True: got += 1 - if got > nest: + if context.peek_token(i + got) is None: + raise CParsingError( + f"Error: Unexpected EOF l.{context.peek_token(i - 1).pos[0]}" + ) + if context.check_token( + i + got, ["LBRACKET", "RBRACKET", "LBRACE", "RBRACE"] + ): + nest -= 1 + if got > nest or ( + got > nest + 1 + and context.history[-1] in ["IsAssignation", "IsVarDeclaration"] + ): context.new_error("TOO_MANY_TAB", context.peek_token(i)) - return True, i - elif got < nest: + elif got < nest or ( + got < nest - 1 + and context.history[-1] in ["IsAssignation", "IsVarDeclaration"] + ): context.new_error("TOO_FEW_TAB", context.peek_token(i)) - return True, i + if context.check_token( + i + got, ["LBRACKET", "RBRACKET", "LBRACE", "RBRACE"] + ): + nest += 1 if context.check_token(i, "LPARENTHESIS") is True: nest += 1 if context.check_token(i, "RPARENTHESIS") is True: diff --git a/norminette/rules/check_block_start.py b/norminette/rules/check_block_start.py index 1a7fc6ff..4db01941 100644 --- a/norminette/rules/check_block_start.py +++ b/norminette/rules/check_block_start.py @@ -1,17 +1,18 @@ -from rules import Rule -from scope import * +from norminette.rules import Rule, Check +from norminette.scope import GlobalScope, ControlStructure -class CheckBlockStart(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsBlockStart"] + +class CheckBlockStart(Rule, Check): + depends_on = ( + "IsBlockStart", + ) def run(self, context): """ - Braces signal that the control structure, function, or user defined type can contain - multiple lines. - A control structure that has no braces can only contain one instruction line, but can - contain multiple control structures + Braces signal that the control structure, function, or user defined type can contain + multiple lines. + A control structure that has no braces can only contain one instruction line, but can + contain multiple control structures """ outer = context.scope.get_outer() if len(context.history) > 2: @@ -23,13 +24,21 @@ def run(self, context): elif i == 3: hist_2 = item i += 1 - if type(context.scope) is GlobalScope and context.scope.tmp_scope is not None \ - and hist_1 == "IsFuncDeclaration" and hist_2 == "IsPreprocessorStatement": + if ( + type(context.scope) is GlobalScope + and context.scope.tmp_scope is not None + and hist_1 == "IsFuncDeclaration" + and hist_2 == "IsPreprocessorStatement" + ): context.scope.functions -= 1 context.scope = context.tmp_scope context.scope.multiline = True context.tmp_scope = None - if type(context.scope) == ControlStructure and outer is not None and type(outer) == ControlStructure: - if outer.multiline == False: + if ( + type(context.scope) is ControlStructure + and outer is not None + and type(outer) is ControlStructure + ): + if outer.multiline is False: context.new_error("MULT_IN_SINGLE_INSTR", context.peek_token(0)) - return False, 0 \ No newline at end of file + return False, 0 diff --git a/norminette/rules/check_brace.py b/norminette/rules/check_brace.py index 6d039869..df799f87 100644 --- a/norminette/rules/check_brace.py +++ b/norminette/rules/check_brace.py @@ -1,26 +1,28 @@ -from rules import Rule -from scope import * +from norminette.rules import Rule, Check -class CheckBrace(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsBlockStart", "IsBlockEnd"] +class CheckBrace(Rule, Check): + depends_on = ( + "IsBlockStart", + "IsBlockEnd", + ) def run(self, context): """ - C files must end with an empty line - Functions can only have 25 lines + C files must end with an empty line + Functions can only have 25 lines """ i = 0 i = context.skip_ws(i, nl=False) - #if context.check_token(i, ["RBRACE", "LBRACE"]) is False and context.scope.type != "GlobalScope": + # if context.check_token(i, ["RBRACE", "LBRACE"]) is False and context.scope.type != "GlobalScope": # context.new_error("BRACE_EMPTY_LINE") if context.check_token(i, ["RBRACE", "LBRACE"]) is False: context.new_error("EXPECTED_BRACE", context.peek_token(i)) return False, 0 i += 1 i = context.skip_ws(i, nl=False) + if context.check_token(i, "NEWLINE") is True and context.check_token(i - 1, ["SPACE", "TAB"]): + context.new_error("SPC_BEFORE_NL", context.peek_token(i - 1)) if context.check_token(i, "NEWLINE") is False or context.check_token(i, "NEWLINE") is None: if context.scope.name == "UserDefinedType" or context.scope.name == "UserDefinedEnum": i = context.skip_ws(i, nl=False) diff --git a/norminette/rules/check_comment.py b/norminette/rules/check_comment.py index d4ba0443..0d4a5c1b 100644 --- a/norminette/rules/check_comment.py +++ b/norminette/rules/check_comment.py @@ -1,33 +1,56 @@ -from rules import Rule -from scope import * +from norminette.rules import Rule, Check -allowed_on_comment = [ - "COMMENT", - "MULT_COMMENT", - "SPACE", - "TAB" -] - -class CheckComment(Rule): - def __init__(self): - super().__init__() - self.depends_on = [] +class CheckComment(Rule, Check): def run(self, context): """ - Comments are only allowed in GlobalScope. + Comments are forbidden inside functions and in the middle of instructions. """ i = context.skip_ws(0) - has_comment = False - while context.peek_token(i) is not None and context.check_token(i, "NEWLINE") is False: - if context.check_token(i, allowed_on_comment) is False: - if has_comment == True: - context.new_error("COMMENT_ON_INSTR", context.peek_token(i)) - return True, i - elif context.check_token(i, ['COMMENT', 'MULT_COMMENT']) is True: - if context.scope.name != "GlobalScope" or context.history[-1] == 'IsFuncDeclaration': - context.new_error("WRONG_SCOPE_COMMENT", context.peek_token(i)) - has_comment = True + + tokens = [] + while context.peek_token(i) and not context.check_token(i, "NEWLINE"): + token = context.peek_token(i) + tokens.append(token) i += 1 - i = context.skip_ws(0) - return False, 0 + + for index, token in enumerate(tokens): + if token.type in ("COMMENT", "MULT_COMMENT"): + if self.is_inside_a_function(context): + context.new_error("WRONG_SCOPE_COMMENT", token) + if index == 0 or self.is_last_token(token, tokens[index+1:]): + continue + context.new_error("COMMENT_ON_INSTR", token) + + def is_inside_a_function(self, context): + if context.history[-2:] == ["IsFuncDeclaration", "IsBlockStart"]: + return True + if context.scope.__class__.__name__.lower() == "function": + return True + # Sometimes the context scope is a `ControlStructure` scope instead of + # `Function` scope, so, to outsmart this bug, we need check manually + # the `context.history`. + last = None + for index, record in enumerate(reversed(context.history)): + if record == "IsFuncDeclaration" and last == "IsBlockStart": + # Since the limited history API, we can't say if we're in a + # nested function to reach the first enclosing function, so, + # we'll consider that the user just declared a normal function + # in global scope. + stack = 1 + index -= 1 # Jumps to next record after `IsBlockStart` + while index > 0 and stack > 0: + record = context.history[-index] + index -= 1 + if record not in ("IsBlockStart", "IsBlockEnd"): + continue + stack = stack + (1, -1)[record == "IsBlockEnd"] + return bool(stack) + last = record + return False + + def is_last_token(self, token, foward): + expected = ("SPACE", "TAB") + if token.type == "MULT_COMMENT": + expected += ("COMMENT", "MULT_COMMENT") + return all(it.type in ("SPACE", "TAB", "COMMENT", "MULT_COMMENT") for it in foward) diff --git a/norminette/rules/check_comment_line_len.py b/norminette/rules/check_comment_line_len.py index f58a1a9f..9a4ebb9a 100644 --- a/norminette/rules/check_comment_line_len.py +++ b/norminette/rules/check_comment_line_len.py @@ -1,23 +1,28 @@ -from rules import Rule -from lexer import Lexer, TokenError -from scope import * +from norminette.rules import Rule, Check -class CheckCommentLineLen(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsComment"] + +class CheckCommentLineLen(Rule, Check): + depends_on = ("IsComment",) def run(self, context): """ - Lines must not be over 80 characters long + Lines must not be over 80 characters long """ i = 0 - while context.check_token(i, ["COMMENT", "MULT_COMMENT"]) is False: + while not context.check_token(i, ["COMMENT", "MULT_COMMENT"]): i += 1 - val = context.peek_token(i).value - line_start = context.peek_token(0).pos[1] - val = val.split('\n') - for item in val: - if len(item) + line_start > 81: - context.new_error("LINE_TOO_LONG", context.peek_token(0)) - line_start = 0 + token = context.peek_token(i) + if not token: + return + index = token.pos[1] + if token.type == "MULT_COMMENT": + lines = token.value.split("\n") + # We need to add a padding to the first line because the comment + # can be at the end of a line. + lines[0] = " " * index + lines[0] + for lineno, line in enumerate(lines, start=token.pos[0]): + if len(line) > 81: + token.pos = (lineno, 1) + context.new_error("LINE_TOO_LONG", token) + elif index + len(token.value) > 81: # token.type == "COMMENT" + context.new_error("LINE_TOO_LONG", token) diff --git a/norminette/rules/check_control_statement.py b/norminette/rules/check_control_statement.py index ae6749ad..3cdca8ca 100644 --- a/norminette/rules/check_control_statement.py +++ b/norminette/rules/check_control_statement.py @@ -1,12 +1,7 @@ -from rules import Rule -from scope import * +from norminette.rules import Rule, Check -forbidden_cs = [ - "FOR", - "SWITCH", - "CASE", - "GOTO" -] + +forbidden_cs = ["FOR", "SWITCH", "CASE", "GOTO"] assigns = [ "RIGHT_ASSIGN", "LEFT_ASSIGN", @@ -21,28 +16,37 @@ "ASSIGN", ] -class CheckControlStatement(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsControlStatement"] + +class CheckControlStatement(Rule, Check): + depends_on = ( + "IsControlStatement", + ) def check_nest(self, context, i): - while context.check_token(i, "RPARENTHESIS") is False: + depth = 1 + i += 1 + while depth > 0: + if context.check_token(i, "LPARENTHESIS") is True: + depth += 1 + if context.check_token(i, "RPARENTHESIS") is True: + depth -= 1 if context.check_token(i, assigns) is True: context.new_error("ASSIGN_IN_CONTROL", context.peek_token(i)) return -1 if context.check_token(i, forbidden_cs) is True: context.new_error("FORBIDDEN_CS", context.peek_token(i)) + if context.check_token(i, "NEWLINE") is True and depth < 1: + return i += 1 return def run(self, context): """ - Forbidden control structures: - - For - - Switch case - - Goto - Assignations must be done outside of control structures + Forbidden control structures: + - For + - Switch case + - Goto + Assignations must be done outside of control structures """ i = 0 if context.scope.name == "GlobalScope": diff --git a/norminette/rules/check_declaration.py b/norminette/rules/check_declaration.py index cc505dc7..4f69a19e 100644 --- a/norminette/rules/check_declaration.py +++ b/norminette/rules/check_declaration.py @@ -1,18 +1,18 @@ -from rules import Rule -from scope import * +from norminette.rules import Rule, Check -class CheckDeclaration(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsDeclaration"] + +class CheckDeclaration(Rule, Check): + depends_on = ( + "IsDeclaration", + ) def run(self, context): """ - Checks for nl in declarations + Checks for nl in declarations """ - #i = context.skip_ws(0) - #while context.peek_token(i) and context.check_token(i, "SEMI_COLON") is False: - #if context.check_token(i, "NEWLINE") is True: - #context.new_error("NEWLINE_IN_DECL", context.peek_token(i)) - #i += 1 + # i = context.skip_ws(0) + # while context.peek_token(i) and context.check_token(i, "SEMI_COLON") is False: + # if context.check_token(i, "NEWLINE") is True: + # context.new_error("NEWLINE_IN_DECL", context.peek_token(i)) + # i += 1 return False, 0 diff --git a/norminette/rules/check_empty_line.py b/norminette/rules/check_empty_line.py index 6a6be0aa..b5032dcc 100644 --- a/norminette/rules/check_empty_line.py +++ b/norminette/rules/check_empty_line.py @@ -1,43 +1,54 @@ -from rules import Rule -from scope import * +from norminette.rules import Rule, Check - -class CheckEmptyLine(Rule): - def __init__(self): - super().__init__() - self.depends_on = [] - +class CheckEmptyLine(Rule, Check): def run(self, context): """ - Empty line must not contains tabs or spaces - You cannot have 2 empty lines in a row - Your variable declarations must be followed by an empty line - No other empty lines are allowed in functions - You must have an empty between two functions + Empty line must not contains tabs or spaces + You cannot have 2 empty lines in a row + Your variable declarations must be followed by an empty line + No other empty lines are allowed in functions + You must have an empty between two functions """ i = 0 if len(context.history) == 1 and context.history[-1] == "IsEmptyLine": context.new_error("EMPTY_LINE_FILE_START", context.peek_token(i)) return False, 0 - if context.scope.name != "GlobalScope" and context.history[-1] != "IsBlockStart": - if context.history[-1] != "IsVarDeclaration" and context.scope.vdeclarations_allowed == True: + if context.scope.name != "GlobalScope": + if ( + context.history[-1] != "IsVarDeclaration" + and context.scope.vdeclarations_allowed is True + ): context.scope.vdeclarations_allowed = False if context.history[-1] not in ["IsEmptyLine", "IsComment"]: - if context.history[-1] == "IsBlockEnd" and context.scope.name == "Function": + if ( + context.history[-1] == "IsBlockEnd" + and context.scope.name == "Function" + ): pass else: context.new_error("NL_AFTER_VAR_DECL", context.peek_token(i)) return True, i + if ( + len(context.history) > 1 + and context.history[-2] == "IsPreprocessorStatement" + and context.history[-1] != "IsPreprocessorStatement" + and context.history[-1] != "IsEmptyLine" + and context.history[-1] != "IsComment" + ): + context.new_error("NL_AFTER_PREPROC", context.peek_token(i)) if context.history[-1] != "IsEmptyLine": return False, 0 if context.check_token(i, "NEWLINE") is False: context.new_error("SPACE_EMPTY_LINE", context.peek_token(i)) if context.history[-2] == "IsEmptyLine": context.new_error("CONSECUTIVE_NEWLINES", context.peek_token(i)) - if context.history[-2] != "IsVarDeclaration" and context.scope.name != "GlobalScope": - context.new_error("EMPTY_LINE_FUNCTION", context.peek_token(i)) - if context.peek_token(i + 1) is None: + if ( + context.history[-2] != "IsVarDeclaration" + and context.scope.name != "GlobalScope" + ): + context.new_error("EMPTY_LINE_FUNCTION", context.peek_token(0)) + if context.check_token(i, "NEWLINE") and context.peek_token(i + 1) is None: context.new_error("EMPTY_LINE_EOF", context.peek_token(i)) return False, 0 diff --git a/norminette/rules/check_enum_var_decl.py b/norminette/rules/check_enum_var_decl.py index 4cdec2ab..cd490df4 100644 --- a/norminette/rules/check_enum_var_decl.py +++ b/norminette/rules/check_enum_var_decl.py @@ -1,14 +1,14 @@ -from rules import Rule -from scope import * +from norminette.rules import Rule, Check -class CheckEnumVarDecl(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsEnumVarDecl"] + +class CheckEnumVarDecl(Rule, Check): + depends_on = ( + "IsEnumVarDecl", + ) def run(self, context): """ - Checks for nl in declarations + Checks for nl in declarations """ i = context.skip_ws(0) while context.peek_token(i) and context.check_token(i, "COMMA") is False: @@ -18,7 +18,7 @@ def run(self, context): if context.check_token(i, "LBRACE") is True: return False, 0 i += 1 - #context.new_error("NEWLINE_IN_DECL", context.peek_token(i)) + # context.new_error("NEWLINE_IN_DECL", context.peek_token(i)) return True, i i += 1 return False, 0 diff --git a/norminette/rules/check_expression_statement.py b/norminette/rules/check_expression_statement.py index 5a682043..c150f4f5 100644 --- a/norminette/rules/check_expression_statement.py +++ b/norminette/rules/check_expression_statement.py @@ -1,5 +1,4 @@ -from rules import Rule -from scope import * +from norminette.rules import Rule, Check kw = [ # C reserved keywords # @@ -33,39 +32,52 @@ "UNSIGNED", "VOID", "VOLATILE", - "WHILE" + "WHILE", ] -class CheckExpressionStatement(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsExpressionStatement", "IsControlStatement", "IsFunctionCall", "IsAssignation"] + +class CheckExpressionStatement(Rule, Check): + depends_on = ( + "IsExpressionStatement", + "IsControlStatement", + "IsFunctionCall", + "IsAssignation", + "IsCast", + ) def run(self, context): """ - C keywords (return, break, continue...) must be followed by a space, with the - exception of sizeof - Return values in a function must be contained in parenthesis + C keywords (return, break, continue...) must be followed by a space, with the + exception of sizeof + Return values in a function must be contained in parenthesis """ i = 0 - parenthesis = False while context.check_token(i, ["SEMI_COLON", "NEWLINE"]) is False: if context.check_token(i, kw) is True: - if context.check_token(i + 1, ["SPACE", "NEWLINE", "RPARENTHESIS"]) is False: + if ( + context.check_token( + i + 1, + ["SPACE", "NEWLINE", "RPARENTHESIS", "COMMENT", "MULT_COMMENT"], + ) + is False + ): context.new_error("SPACE_AFTER_KW", context.peek_token(i)) - return False, 0 + if context.check_token(i, ["MULT", "BWISE_AND"]) is True and i > 0: + if context.check_token(i - 1, "IDENTIFIER") is True: + context.new_error("SPACE_AFTER_KW", context.peek_token(i - 1)) if context.check_token(i, "RETURN") is True: tmp = i + 1 tmp = context.skip_ws(tmp) - if context.check_token(tmp, "SEMI_COLON") is True: - return False, 0 - if context.check_token(tmp, "SEMI_COLON") is False and context.check_token(tmp, "LPARENTHESIS") is False: + if ( + context.check_token(tmp, "SEMI_COLON") is False + and context.check_token(tmp, "LPARENTHESIS") is False + ): context.new_error("RETURN_PARENTHESIS", context.peek_token(tmp)) return False, 0 - else: + elif context.check_token(tmp, "SEMI_COLON") is False: tmp = context.skip_nest(tmp) + 1 if context.check_token(tmp, "SEMI_COLON") is False: context.new_error("RETURN_PARENTHESIS", context.peek_token(tmp)) - return False, 0 + return False, 0 i += 1 return False, 0 diff --git a/norminette/rules/check_func_arguments_name.py b/norminette/rules/check_func_arguments_name.py index f1a5c8b2..f589e48e 100644 --- a/norminette/rules/check_func_arguments_name.py +++ b/norminette/rules/check_func_arguments_name.py @@ -1,51 +1,23 @@ -from lexer import Token -from rules import Rule +from norminette.rules import Rule, Check -type_specifiers = [ - "CHAR", - "DOUBLE", - "ENUM", - "FLOAT", - "INT", - "UNION", - "VOID", - "SHORT" -] +type_specifiers = ["CHAR", "DOUBLE", "ENUM", "FLOAT", "INT", "UNION", "VOID", "SHORT"] -misc_specifiers = [ - "CONST", - "REGISTER", - "STATIC", - "STRUCT", - "VOLATILE" -] +misc_specifiers = ["CONST", "REGISTER", "STATIC", "STRUCT", "VOLATILE"] -size_specifiers = [ - "LONG", - "SHORT" -] +size_specifiers = ["LONG", "SHORT"] -sign_specifiers = [ - "SIGNED", - "UNSIGNED" -] +sign_specifiers = ["SIGNED", "UNSIGNED"] -whitespaces = [ - "SPACE", - "TAB", - "NEWLINE" -] +whitespaces = ["SPACE", "TAB", "NEWLINE"] -arg_separator = [ - "COMMA", - "CLOSING_PARENTHESIS" -] +arg_separator = ["COMMA", "CLOSING_PARENTHESIS"] -class CheckFuncArgumentsName(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsFuncDeclaration", "IsFuncPrototype"] +class CheckFuncArgumentsName(Rule, Check): + depends_on = ( + "IsFuncDeclaration", + "IsFuncPrototype", + ) def check_arg_format(self, context, pos): """ @@ -58,21 +30,30 @@ def check_arg_format(self, context, pos): p = 0 stop = ["COMMA", "RPARENTHESIS"] if context.check_token(i, ["COMMENT", "MULT_COMMENT"]): - context.new_error("WRONG_SCOPE_COMMENT", context.peek_token(i)) + # context.new_error("WRONG_SCOPE_COMMENT", context.peek_token(i)) i += 1 - #if context.check_token(i, "NEWLINE"): - #context.new_error("NEWLINE_IN_DECL", context.peek_token(i)) - #i += 1 + # if context.check_token(i, "NEWLINE"): + # context.new_error("NEWLINE_IN_DECL", context.peek_token(i)) + # i += 1 if context.check_token(i, "ELLIPSIS"): i += 1 if context.peek_token(i).type in stop: i += 1 return i ret, i = context.check_type_specifier(i) - if ret == False: + has_tab = False + while context.check_token(i, ["SPACE", "TAB"]): + if context.check_token(i, "TAB") is True and has_tab is False: + context.new_error("TAB_INSTEAD_SPC", context.peek_token(i)) + has_tab = True + i += 1 + + if ret is False: context.new_error("ARG_TYPE_UKN", context.peek_token(i)) return -1 - while context.peek_token(i) is not None and context.check_token(i, ["LPARENTHESIS"] + whitespaces): + while context.peek_token(i) is not None and context.check_token( + i, ["LPARENTHESIS"] + whitespaces + ): if context.check_token(i, "LPARENTHESIS") is True: p += 1 if context.check_token(i, "RPARENTHESIS") is True: @@ -90,18 +71,19 @@ def check_arg_format(self, context, pos): while context.peek_token(i) is not None and i < context.arg_pos[1]: if context.check_token(i, stop) is True: if context.check_token(i, "RPARENTHESIS") is True and p > 0: - p -= 1 else: break - if context.check_token(i, 'LPARENTHESIS'): + if context.check_token(i, "LPARENTHESIS"): i = context.skip_nest(i) i += 1 i += 1 else: - while context.peek_token(i) is not None \ - and context.peek_token(i).type not in stop: + while ( + context.peek_token(i) is not None + and context.peek_token(i).type not in stop + ): i += 1 i += 1 return i @@ -120,28 +102,33 @@ def no_arg_func(self, context, pos): def run(self, context): """ - Empty functions arguments must use void + Empty functions arguments must use void """ i = context.arg_pos[0] + 1 ret = self.no_arg_func(context, i) if ret is True: return False, 0 while i < context.arg_pos[1]: - if context.check_token(i, 'NEWLINE'): + i = context.skip_ws(i) + if context.check_token(i, "NEWLINE"): i += 1 continue - if context.check_token(i, 'LPARENTHESIS'): + if context.check_token(i, "LPARENTHESIS"): p = 1 while p: if context.peek_token(i) is not None: - if context.check_token(i, 'LPARENTHESIS'): + if context.check_token(i, "LPARENTHESIS"): p += 1 - elif context.check_token(i, 'RPARENTHESIS'): + elif context.check_token(i, "RPARENTHESIS"): p -= 1 else: break i += 1 - else: + elif context.check_token(i, "LBRACKET") is True: + i = context.skip_nest(i) + 1 + elif context.check_token(i, "COMMA") is True: + i += 1 + elif context.check_token(i, "RPARENTHESIS") is not True: i = self.check_arg_format(context, i) if i == -1: return False, 0 diff --git a/norminette/rules/check_func_declaration.py b/norminette/rules/check_func_declaration.py index 45b538fc..1fa62083 100644 --- a/norminette/rules/check_func_declaration.py +++ b/norminette/rules/check_func_declaration.py @@ -1,56 +1,75 @@ -from rules import Rule -from scope import * +from norminette.rules import Rule, Check -types = [ - "INT", - "FLOAT", - "CHAR", - "DOUBLE", - "LONG", - "SHORT" -] +types = ["INT", "FLOAT", "CHAR", "DOUBLE", "LONG", "SHORT"] -class CheckFuncDeclaration(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsFuncDeclaration", "IsFuncPrototype"] + +class CheckFuncDeclaration(Rule, Check): + depends_on = ( + "IsFuncDeclaration", + "IsFuncPrototype", + "IsUserDefinedType", + ) def run(self, context): """ - Maximum 4 arguments in a function - Function declaration must be preceded by a newline + Maximum 4 arguments in a function + Function declaration must be preceded by a newline """ + # pdb.set_trace() i = 0 tmp = 0 + start = 0 arg = 1 while context.check_token(tmp, ["SEMI_COLON", "NEWLINE"]) is False: if context.check_token(tmp, "LBRACE") is True: context.new_error("BRACE_NEWLINE", context.peek_token(tmp)) tmp += 1 - #if tmp < context.tkn_scope - 2: - #context.new_error("NEWLINE_IN_DECL", context.peek_token(tmp)) - #this is a func declaration - if context.check_token(tmp, "SEMI_COLON") is False: - if len(context.history) > 1 and context.history[-2] != "IsEmptyLine" and context.history[-2] != "IsPreprocessorStatement" and context.history[-1] == 'IsFuncDeclaration': - context.new_error("NEWLINE_PRECEDES_FUNC", context.peek_token(i)) - #this is a func prototype + if context.history[-1] == "IsUserDefinedType": + return + # if tmp < context.tkn_scope - 2: + # context.new_error("NEWLINE_IN_DECL", context.peek_token(tmp)) + # this is a func declaration + if context.history[-1] == "IsFuncDeclaration": + # if context.check_token(tmp, "SEMI_COLON") is False: + i = 2 + length = len(context.history) + while length - i >= 0 and ( + context.history[-i] == "IsPreprocessorStatement" + or context.history[-i] == "IsComment" + or context.history[-i] == "IsFuncDeclaration" + ): + i += 1 + if length - i > 0 and context.history[-i] != "IsEmptyLine": + context.new_error("NEWLINE_PRECEDES_FUNC", context.peek_token(start)) i = context.fname_pos + 1 - while (context.check_token(i, ["RPARENTHESIS", "SPACE", "TAB"])) is True: + while ( + context.check_token(i, ["RPARENTHESIS"]) + ) is True: # , "SPACE", "TAB"])) is True: i += 1 if context.check_token(i, "LPARENTHESIS") is False: context.new_error("EXP_PARENTHESIS", context.peek_token(i)) + i = context.skip_ws(i) i += 1 - while context.check_token(i, "RPARENTHESIS") is False: - if context.check_token(i, "COMMA"): + deep = 1 + while deep > 0 and context.peek_token(i) is not None: + if context.check_token(i, "LPARENTHESIS"): + i = context.skip_nest(i) + elif context.check_token(i, "RPARENTHESIS"): + deep -= 1 + elif context.check_token(i, "COMMA"): arg += 1 i += 1 if context.check_token(i - 1, ["SPACE", "TAB"]) is True: tmp = i - 1 - while context.check_token(tmp, ['SPACE', 'TAB']) is True: + while context.check_token(tmp, ["SPACE", "TAB"]) is True: tmp -= 1 - if context.check_token(tmp, 'NEWLINE') is False: + if context.check_token(tmp, "NEWLINE") is False: context.new_error("NO_SPC_BFR_PAR", context.peek_token(i)) if arg > 4: context.new_error("TOO_MANY_ARGS", context.peek_token(i)) arg = [] + while context.check_token(i, ["NEWLINE", "SEMI_COLON"]) is False: + i += 1 + if context.check_token(i - 1, ["TAB", "SPACE"]): + context.new_error("SPC_BEFORE_NL", context.peek_token(i)) return False, 0 diff --git a/norminette/rules/check_func_spacing.py b/norminette/rules/check_func_spacing.py index 5f7effa7..2302377b 100644 --- a/norminette/rules/check_func_spacing.py +++ b/norminette/rules/check_func_spacing.py @@ -1,55 +1,33 @@ -from rules import Rule +from norminette.rules import Rule, Check -whitespaces = [ - "SPACE", - "TAB", - "NEWLINE" -] +whitespaces = ["SPACE", "TAB", "NEWLINE"] -type_specifiers = [ - "CHAR", - "DOUBLE", - "ENUM", - "FLOAT", - "INT", - "UNION", - "VOID", - "SHORT" -] +type_specifiers = ["CHAR", "DOUBLE", "ENUM", "FLOAT", "INT", "UNION", "VOID", "SHORT"] -misc_specifiers = [ - "CONST", - "REGISTER", - "STATIC", - "STRUCT", - "VOLATILE" -] +misc_specifiers = ["CONST", "REGISTER", "STATIC", "STRUCT", "VOLATILE"] -size_specifiers = [ - "LONG", - "SHORT" -] +size_specifiers = ["LONG", "SHORT"] -sign_specifiers = [ - "SIGNED", - "UNSIGNED" -] +sign_specifiers = ["SIGNED", "UNSIGNED"] -arg_separator = [ - "COMMA", - "CLOSING_PARENTHESIS" -] +arg_separator = ["COMMA", "CLOSING_PARENTHESIS"] -class CheckFuncSpacing(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsFuncDeclaration"] +class CheckFuncSpacing(Rule, Check): + depends_on = ( + "IsFuncDeclaration", + ) def run(self, context): """ - Function return type and function name must be separated by a tab + Function return type and function name must be separated by a tab """ + i = 0 + while i < context.fname_pos: + if context.check_token(i, "IDENTIFIER") is True and context.peek_token(i).value == "__attribute__": + # context.new_error("ATTR_EOL", context.peek_token(i)) + break + i += 1 i = context.fname_pos - 1 while context.check_token(i, ["MULT", "BWISE_AND", "LPARENTHESIS"]) is True: i -= 1 diff --git a/norminette/rules/check_functions_count.py b/norminette/rules/check_functions_count.py index 1ec65fca..232b179b 100644 --- a/norminette/rules/check_functions_count.py +++ b/norminette/rules/check_functions_count.py @@ -1,18 +1,16 @@ -from lexer import Token -from rules import Rule -import string +from norminette.rules import Rule, Check -class CheckFunctionsCount(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsFuncDeclaration"] +class CheckFunctionsCount(Rule, Check): + depends_on = ( + "IsFuncDeclaration", + ) def run(self, context): """ - Each file cannot contain more than 5 function + Each file cannot contain more than 5 function """ - if context.scope != None and context.scope.name == "GlobalScope": + if context.scope is not None and context.scope.name == "GlobalScope": if context.scope.functions > 5: context.new_error("TOO_MANY_FUNCS", context.peek_token(0)) - return False, 0 \ No newline at end of file + return False, 0 diff --git a/norminette/rules/check_general_spacing.py b/norminette/rules/check_general_spacing.py index afe70340..7ae40f4d 100644 --- a/norminette/rules/check_general_spacing.py +++ b/norminette/rules/check_general_spacing.py @@ -1,20 +1,18 @@ -from rules import Rule -from scope import * +from norminette.rules import Rule, Check -class CheckGeneralSpacing(Rule): - def __init__(self): - super().__init__() - self.depends_on = [ - "IsDeclaration", - "IsControlStatement", - "IsExpressionStatement", - "IsAssignation", - "IsFunctionCall", - ] + +class CheckGeneralSpacing(Rule, Check): + depends_on = ( + "IsDeclaration", + "IsControlStatement", + "IsExpressionStatement", + "IsAssignation", + "IsFunctionCall", + ) def run(self, context): """ - Checks for tab/space consistency + Checks for tab/space consistency """ if context.scope.name == "UserDefinedType": return False, 0 @@ -23,7 +21,7 @@ def run(self, context): if context.check_token(i, "TAB") is True: context.new_error("TAB_INSTEAD_SPC", context.peek_token(i)) break - if context.check_token(i, ["NEWLINE","ESCAPED_NEWLINE"]) is True: + if context.check_token(i, ["NEWLINE", "ESCAPED_NEWLINE"]) is True: i = context.skip_ws(i + 1, nl=True) i += 1 return False, 0 diff --git a/norminette/rules/check_global_naming.py b/norminette/rules/check_global_naming.py index c6fa3b44..b5ea6844 100644 --- a/norminette/rules/check_global_naming.py +++ b/norminette/rules/check_global_naming.py @@ -1,5 +1,4 @@ -from rules import Rule -from lexer import Lexer, TokenError +from norminette.rules import Rule, Check types = [ "INT", @@ -19,26 +18,32 @@ "VOLATILE", "EXTERN", "SPACE", - "TAB" + "TAB", ] -class CheckGlobalNaming(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsVarDeclaration"] + +class CheckGlobalNaming(Rule, Check): + depends_on = ( + "IsVarDeclaration", + ) def run(self, context): """ - Global variable names must be preceded by g_ + Global variable names must be preceded by g_ """ i = 0 - last_id = "" if context.scope.name != "GlobalScope": return False, 0 i = context.skip_ws(i) _, i = context.check_type_specifier(i) + i = context.skip_ws(i) while context.check_token(i, "IDENTIFIER") is False: i += 1 - if context.peek_token(i).value.startswith("g_") is False: + if ( + context.peek_token(i) is not None + and context.peek_token(i).value != "environ" + ): + context.new_warning("GLOBAL_VAR_DETECTED", context.peek_token(0)) + if context.peek_token(i).value.startswith("g_") is False: context.new_error("GLOBAL_VAR_NAMING", context.peek_token(i)) return False, i diff --git a/norminette/rules/check_header.py b/norminette/rules/check_header.py new file mode 100644 index 00000000..85148060 --- /dev/null +++ b/norminette/rules/check_header.py @@ -0,0 +1,44 @@ +from norminette.rules import Rule, Check +import re + + +class CheckHeader(Rule, Check): + def parse_header(self, context): + if context.check_token(0, "MULT_COMMENT") is False: + context.new_error("INVALID_HEADER", context.peek_token(0)) + context.header_parsed = True + return + context.header += context.peek_token(0).value + "\n" + + def check_header(self, context): + # val = r"\/\* \*{74} \*\/\n\/\*.*\*\/\n\/\*.*\*\/\n\/\*.{3}([^ ]*).*\*\/\n\/\*.*\*\/\n\/\* By: ([^ ]*).*\*\/\n\/\*.*\*\/\n\/\* Created: ([^ ]* [^ ]*) by ([^ ]*).*\*\/\n\/\* Updated: ([^ ]* [^ ]*) by ([^ ]*).*\*\/\n\/\*.*\*\/\n\/\* \*{74} \*\/\n" # noqa: E501 + val_no_check_nl = r"\/\* \*{74} \*\/.\/\*.*\*\/.\/\*.*\*\/.\/\*.{3}([^ ]*).*\*\/.\/\*.*\*\/.\/\* By: ([^ ]*).*\*\/.\/\*.*\*\/.\/\* Created: ([^ ]* [^ ]*) by ([^ ]*).*\*\/.\/\* Updated: ([^ ]* [^ ]*) by ([^ ]*).*\*\/.\/\*.*\*\/.\/\* \*{74} \*\/." # noqa: E501 + + # correct_header = re.match(val, context.header) + regex = re.compile(val_no_check_nl, re.DOTALL) + # correct_header_no_nl = re.match(val_no_check_nl, context.header) + correct_header_no_nl = regex.search(context.header) + if correct_header_no_nl is None: + context.new_error("INVALID_HEADER", context.peek_token(0)) + # else: + # print (correct_header.group(1,2,3,4,5,6)) + + def run(self, context): + """ + Header checking. Just a warning for now. Does not trigger moulinette error + """ + if context.header_parsed is True: + return False, 0 + elif context.history[-1] == "IsComment" and context.header_parsed is False: + self.parse_header(context) + context.header_started = True + elif context.history[-1] != "IsComment" and context.header_started is True: + self.check_header(context) + context.header_parsed = True + elif ( + context.header_started is False + and context.header_parsed is False + and context.history[-1] != "IsComment" + ): + context.new_error("INVALID_HEADER", context.peek_token(0)) + context.header_parsed = True diff --git a/norminette/rules/check_identifier_name.py b/norminette/rules/check_identifier_name.py index 2adaf057..3dbcd89f 100644 --- a/norminette/rules/check_identifier_name.py +++ b/norminette/rules/check_identifier_name.py @@ -1,58 +1,35 @@ -from lexer import Token -from rules import Rule import string -from scope import * +from norminette.rules import Rule, Check +from norminette.scope import GlobalScope, UserDefinedType -assigns = [ - 'ASSIGN' -] -class CheckIdentifierName(Rule): - def __init__(self): - super().__init__() - self.depends_on = [] +assigns = ["ASSIGN"] + +class CheckIdentifierName(Rule, Check): def run(self, context): """ - Function can only be declared in the global scope - User defined identifiers can only contain lowercase characters, '_' or digits + Function can only be declared in the global scope + User defined identifiers can only contain lowercase characters, '_' or digits """ - i = 0 - legal_characters = string.ascii_lowercase + string.digits + '_' - legal_cap_characters = string.ascii_uppercase + string.digits + '_' - if context.history[-1] == "IsFuncDeclaration" or context.history[-1] == "IsFuncPrototype": + legal_characters = string.ascii_lowercase + string.digits + "_" + if context.history[-1] == "IsFuncDeclaration": sc = context.scope if type(sc) is not GlobalScope and type(sc) is not UserDefinedType: context.new_error("WRONG_SCOPE_FCT", context.peek_token(0)) - #while type(sc) != GlobalScope: - #sc = sc.outer() - #for c in sc.fnames[-1]: - #if c not in legal_characters: - #context.new_error( - #"FORBIDDEN_CHAR_NAME", - #context.peek_token(context.fname_pos)) - #break - #passed_assign = False - #err = None - #hist = context.history[-1] - #while i < context.tkn_scope and context.peek_token(i) is not None: - #if context.check_token(i, assigns) is True: - #passed_assign = True - #if context.check_token(i, "IDENTIFIER") and hist not in ['IsFuncDeclaration', 'IsFuncPrototype']: - #for c in context.peek_token(i).value: - #if c not in legal_characters: - #err = ("FORBIDDEN_CHAR_NAME", context.peek_token(i)) - #break - #if err is not None and hist not in ['IsFuncDeclaration', 'IsFuncPrototype'] or (hist == 'IsVariable' and passed_assign == True): - #for c in context.peek_token(i).value: - #if c not in legal_cap_characters: - #err = ("FORBIDDEN_CHAR_NAME", context.peek_token(i)) - #break - #else: - #err = None - #if err is not None: - #context.new_error(err[0], err[1]) - #break - #i += 1 + while type(sc) is not GlobalScope: + sc = sc.outer() + for c in sc.fnames[-1]: + if c not in legal_characters: + context.new_error( + "FORBIDDEN_CHAR_NAME", context.peek_token(context.fname_pos) + ) + if len(context.scope.vars_name) > 0: + for val in context.scope.vars_name[::]: + for c in val.value: + if c not in legal_characters: + context.new_error("FORBIDDEN_CHAR_NAME", val) + break + context.scope.vars_name.remove(val) return False, 0 diff --git a/norminette/rules/check_in_header.py b/norminette/rules/check_in_header.py index dd325af6..e9dc5967 100644 --- a/norminette/rules/check_in_header.py +++ b/norminette/rules/check_in_header.py @@ -1,5 +1,5 @@ -from rules import Rule -from scope import * +from norminette.rules import Rule, Check + allowed_in_header = [ "IsVarDeclaration", @@ -19,38 +19,35 @@ "IsFuncPrototype", ] -class CheckInHeader(Rule): - def __init__(self): - super().__init__() - self.depends_on = [ - "IsVarDeclaration", - "IsUserDefinedType", - "IsPreprocessorStatement", - "IsEmptyLine", - "IsBlockStart", - "IsBlockEnd", - "IsComment", - "IsEndOfLine", - "IsFuncPrototype", - ] + +class CheckInHeader(Rule, Check): + depends_on = ( + "IsVarDeclaration", + "IsUserDefinedType", + "IsPreprocessorStatement", + "IsEmptyLine", + "IsBlockStart", + "IsBlockEnd", + "IsComment", + "IsEndOfLine", + "IsFuncPrototype", + ) def run(self, context): """ - Each .h file must be protected against double inclusion - Instructions allowed in header files: - - Variable declaration - - User defined types - - Comments - - Function prototypes + Each .h file must be protected against double inclusion + Instructions allowed in header files: + - Variable declaration + - User defined types + - Comments + - Function prototypes """ - if context.filetype != 'h': + if context.file.type != ".h": return False, 0 sc = context.scope - while sc.name != 'GlobalScope': + while sc.name != "GlobalScope": sc = sc.get_outer() if context.history[-1] not in allowed_in_header: context.new_error("FORBIDDEN_IN_HEADER", context.peek_token(0)) return False, 0 - elif context.history[-1] in must_be_within_define and sc.header_protection != 1: - context.new_error("HEADER_PROT_ALL", context.peek_token(0)) - return False, 0 \ No newline at end of file + return False, 0 diff --git a/norminette/rules/check_label.py b/norminette/rules/check_label.py index 69d10a38..a7de4daf 100644 --- a/norminette/rules/check_label.py +++ b/norminette/rules/check_label.py @@ -1,15 +1,21 @@ -from rules import Rule -from scope import * +from norminette.rules import Rule, Check -class CheckLabel(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsLabel"] - +class CheckLabel(Rule, Check): def run(self, context): """ - Goto and labels are forbidden + Goto and labels are forbidden """ - context.new_error("LABEL_FBIDDEN", context.peek_token(0)) - return False, 0 \ No newline at end of file + i = 0 + if context.scope.name not in ("Function", "ControlStructure"): + return False, 0 + i = context.skip_ws(i) + if context.check_token(i, "GOTO"): + context.new_error("GOTO_FBIDDEN", context.peek_token(0)) + return False, 0 + if context.check_token(i, "IDENTIFIER") is False: + return False, 0 + i = context.skip_ws(i + 1) + if context.check_token(i, "COLON"): + context.new_error("LABEL_FBIDDEN", context.peek_token(0)) + return False, 0 diff --git a/norminette/rules/check_line_count.py b/norminette/rules/check_line_count.py index f887a2bd..55418eef 100644 --- a/norminette/rules/check_line_count.py +++ b/norminette/rules/check_line_count.py @@ -1,30 +1,24 @@ -from rules import Rule -from context import GlobalScope +from norminette.context import GlobalScope +from norminette.rules import Rule, Check -class CheckLineCount(Rule): - def __init__(self): - super().__init__() - self.depends_on = [] - +class CheckLineCount(Rule, Check): def run(self, context): """ - Each function can only have 25 lines between its opening and closing brackets + Each function can only have 25 lines between its opening and closing brackets """ - for t in context.tokens[:context.tkn_scope]: - if t.type == "NEWLINE": + for t in context.tokens[: context.tkn_scope]: + if t.type == "NEWLINE" or t.type == "ESCAPED_NEWLINE": context.scope.lines += 1 if type(context.scope) is GlobalScope: - if context.get_parent_rule() == "CheckFuncDeclarations" \ - and context.scope.lines > 25: + if context.get_parent_rule() == "CheckFuncDeclarations" and context.scope.lines > 25: context.new_error("TOO_MANY_LINES", context.tokens[context.tkn_scope]) return False, 0 if context.get_parent_rule() == "CheckBrace": - if "LBRACE" in \ - [t.type for t in context.tokens[:context.tkn_scope + 1]]: + if "LBRACE" in [t.type for t in context.tokens[: context.tkn_scope + 1]]: if type(context.scope) is GlobalScope: return False, 0 else: diff --git a/norminette/rules/check_line_indent.py b/norminette/rules/check_line_indent.py index 75479ccf..9f319aa3 100644 --- a/norminette/rules/check_line_indent.py +++ b/norminette/rules/check_line_indent.py @@ -1,21 +1,25 @@ -from rules import Rule -from scope import * +from norminette.rules import Rule, Check +from norminette.scope import GlobalScope - -class CheckLineIndent(Rule): - def __init__(self): - super().__init__() - self.depends_on = [] - +class CheckLineIndent(Rule, Check): def run(self, context): """ - Each new scope (function, control structure, struct/enum type declaration) adds a tab to the general indentation + Each new scope (function, control structure, struct/enum type declaration) adds a tab to the general indentation """ expected = context.scope.indent - if context.history[-1] == "IsEmptyLine" or context.history[-1] == "IsComment" or context.history[-1] == "IsPreprocessorStatement": + if context.history[-1] in [ + "IsEmptyLine", + "IsComment", + "IsPreprocessorStatement", + "IsVariableDeclaration", + ]: return False, 0 - if context.history[-1] != "IsPreprocessorStatement" and type(context.scope) is GlobalScope and context.scope.include_allowed == True: + if ( + context.history[-1] != "IsPreprocessorStatement" + and type(context.scope) is GlobalScope + and context.scope.include_allowed is True + ): context.scope.include_allowed = False got = 0 while context.check_token(got, "TAB"): @@ -24,11 +28,19 @@ def run(self, context): if context.check_token(got, "RBRACE") is True: expected -= 1 else: - hist = context.history[:len(context.history) - 1] + hist = context.history[: len(context.history) - 1] for item in hist[::-1]: - if item == "IsEmptyLine" or item == "IsComment" or item == "IsPreprocessorStatement": + if ( + item == "IsEmptyLine" + or item == "IsComment" + or item == "IsPreprocessorStatement" + ): continue - if item not in ["IsControlStatement", "IsFuncDeclaration", "IsUserDefinedType"]: + if item not in [ + "IsControlStatement", + "IsFuncDeclaration", + "IsUserDefinedType", + ]: break else: expected -= 1 @@ -39,4 +51,4 @@ def run(self, context): elif got > expected: context.new_error("TOO_MANY_TAB", context.peek_token(0)) return False, got - return False, 0 \ No newline at end of file + return False, 0 diff --git a/norminette/rules/check_line_len.py b/norminette/rules/check_line_len.py index 6a69d96e..c10b7f61 100644 --- a/norminette/rules/check_line_len.py +++ b/norminette/rules/check_line_len.py @@ -1,16 +1,16 @@ -from rules import Rule +from norminette.rules import Rule, Check -class CheckLineLen(Rule): - def __init__(self): - super().__init__() - self.depends_on = [] - +class CheckLineLen(Rule, Check): def run(self, context): """ - Lines must not be over 80 characters long + Lines must not be over 80 characters long """ - for tkn in context.tokens[:context.tkn_scope]: - if tkn.type == "NEWLINE" and tkn.pos[1] > 81: + i = 0 + line_too_long = {} + for tkn in context.tokens[: context.tkn_scope]: + if tkn.pos[1] > 81 and tkn.pos[0] not in line_too_long: context.new_error("LINE_TOO_LONG", tkn) + line_too_long[tkn.pos[0]] = True + i += 1 return False, 0 diff --git a/norminette/rules/check_many_instructions.py b/norminette/rules/check_many_instructions.py index 0a909d12..a9d4a15b 100644 --- a/norminette/rules/check_many_instructions.py +++ b/norminette/rules/check_many_instructions.py @@ -1,26 +1,32 @@ -from lexer import Token -from rules import Rule -import string +from norminette.rules import Rule, Check -class CheckManyInstructions(Rule): - def __init__(self): - super().__init__() - self.depends_on = [ - "IsAssignation", - "IsBlockEnd", - "IsControlStatement", - "IsExpressionStatement", - "IsFuncDeclaration", - "IsFuncPrototype", - "IsUserDefinedType", - "IsVarDeclaration", - "IsFunctionCall"] +class CheckManyInstructions(Rule, Check): + depends_on = ( + "IsAssignation", + "IsBlockEnd", + "IsControlStatement", + "IsExpressionStatement", + "IsFuncDeclaration", + "IsFuncPrototype", + "IsUserDefinedType", + "IsVarDeclaration", + "IsFunctionCall", + ) def run(self, context): """ - Each instruction must be separated by a newline + Each instruction must be separated by a newline """ if context.peek_token(0).pos[1] > 1: context.new_error("TOO_MANY_INSTR", context.peek_token(0)) + return False, 0 + # if context.history[-1] in ["IsFuncDeclaration", "IsFuncPrototype", "IsControlStatement"]: + # return False, 0 + # i = 0 + # while i < context.tkn_scope: + # if context.check_token(i, SEPARATORS) is True: + # context.new_error("TOO_MANY_INSTR", context.peek_token(0)) + # return False, 0 + # i += 1 return False, 0 diff --git a/norminette/rules/check_nest_line_indent.py b/norminette/rules/check_nest_line_indent.py index d2812633..37ed2f3f 100644 --- a/norminette/rules/check_nest_line_indent.py +++ b/norminette/rules/check_nest_line_indent.py @@ -1,5 +1,5 @@ -from rules import Rule -from scope import * +from norminette.rules import Rule, Check + operators = [ "RIGHT_ASSIGN", @@ -17,7 +17,6 @@ "EQUALS", "NOT_EQUAL", "ASSIGN", - "SEMI_COLON", "DOT", "NOT", "MINUS", @@ -39,44 +38,56 @@ ] nest_kw = ["RPARENTHESIS", "LPARENTHESIS", "NEWLINE"] -class CheckNestLineIndent(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsControlStatement", "IsExpressionStatement"] + +class CheckNestLineIndent(Rule, Check): + depends_on = ( + "IsControlStatement", + "IsExpressionStatement", + "IsDeclaration", + ) def find_nest_content(self, context, nest, i): expected = context.scope.indent + nest while context.peek_token(i) is not None: - if context.check_token(i, "LPARENTHESIS") is True: + if context.check_token(i, ["LPARENTHESIS", "LBRACE", "LBRACKET"]) is True: i += 1 - i = self.find_nest_content(context, nest + 1, i) + i = self.find_nest_content(context, nest + 1, i) + 1 + if context.check_token(i, ["RBRACE", "RBRACKET", "RPARENTHESIS"]): + return i elif context.check_token(i, "NEWLINE") is True: if context.check_token(i - 1, operators): context.new_error("EOL_OPERATOR", context.peek_token(i - 1)) + if context.check_token(i, "SEMI_COLON") is True: + return i indent = 0 i += 1 while context.check_token(i, "TAB") is True: indent += 1 i += 1 + if context.check_token(i, ["RBRACE", "RBRACKET", "RPARENTHESIS"]): + expected -= 1 if indent > expected: context.new_error("TOO_MANY_TAB", context.peek_token(i)) elif indent < expected: context.new_error("TOO_FEW_TAB", context.peek_token(i)) - elif context.check_token(i, "RPARENTHESIS"): - return i - i += 1 + if context.check_token(i, ["RBRACE", "RBRACKET", "RPARENTHESIS"]): + expected += 1 + else: + i += 1 return i def run(self, context): """ - Each nest (parenthesis, brackets, braces) adds a tab to the general indentation + Each nest (parenthesis, brackets, braces) adds a tab to the general indentation """ i = 0 - expected = context.scope.indent nest = 0 if context.history[-1] == "IsEmptyLine": return False, 0 - while context.peek_token(i) and context.check_token(i, ["LPARENTHESIS", "NEWLINE"]) is False: + while ( + context.peek_token(i) + and context.check_token(i, ["LPARENTHESIS", "NEWLINE"]) is False + ): i += 1 if context.check_token(i, "NEWLINE") is True: return False, 0 diff --git a/norminette/rules/check_newline_indent.py b/norminette/rules/check_newline_indent.py new file mode 100644 index 00000000..c75f15aa --- /dev/null +++ b/norminette/rules/check_newline_indent.py @@ -0,0 +1,29 @@ +from norminette.rules import Rule, Check + + +class CheckNewlineIndent(Rule, Check): + depends_on = [ + "IsDeclaration", + "IsAssignation", + "IsCast", + "IsExpressionStatement", + ] + + def run(self, context): + """ + If a line has a newline inside, we must check for indent - authorized : same indent/same + 1 indent + """ + if context.scope.name != "Function": + return False, 0 + expected = context.scope.indent + i = context.find_in_scope("NEWLINE", nested=False) + 1 + if i != -1 and i < context.tkn_scope - 2: + start = i + got = 0 + while context.check_token(start + got, "TAB"): + got += 1 + if got > expected + 1: + context.new_error("TOO_MANY_TAB", context.peek_token(start)) + if got < expected: + context.new_error("TOO_FEW_TAB", context.peek_token(start)) + return False, 0 diff --git a/norminette/rules/check_operators_spacing.py b/norminette/rules/check_operators_spacing.py index e4ee7fd0..a87f6849 100644 --- a/norminette/rules/check_operators_spacing.py +++ b/norminette/rules/check_operators_spacing.py @@ -1,4 +1,4 @@ -from rules import Rule +from norminette.rules import Rule, Check operators = [ "RIGHT_ASSIGN", @@ -40,7 +40,7 @@ "BWISE_AND", "RIGHT_SHIFT", "LEFT_SHIFT", - "TERN_CONDITION" + "TERN_CONDITION", ] assign_operators = [ @@ -54,11 +54,10 @@ "AND_ASSIGN", "XOR_ASSIGN", "OR_ASSIGN", - "ASSIGN" + "ASSIGN", ] -gps_operators = [ -] +gps_operators = [] ps_operators = [ # operators that should be prefixed and suffixed by a space @@ -77,9 +76,8 @@ "EQUALS", # == "NOT_EQUAL", # != "ASSIGN", # = - "COLON", # : "DIV", # / - "MULT", # * + "MULT", # * "MODULO", # % "LESS_THAN", # < "MORE_THAN", # > @@ -87,7 +85,7 @@ "OR", # | "RIGHT_SHIFT", # >> "LEFT_SHIFT", # << - "TERN_CONDITION" # ? + "TERN_CONDITION", # ? ] p_operators = [ @@ -97,7 +95,9 @@ s_operators = [ # operators that should only be suffixed by a space - "COMMA" # , + "COMMA", # , + # Where do i put that shit + # "COLON", # : ] son_operators = [ @@ -112,105 +112,222 @@ "BWISE_XOR", # ^ "BWISE_OR", # | "BWISE_AND", # & - "BWISE_NOT", # ~ + "BWISE_NOT", # ~ ] -rnests = [ - "RPARENTHESIS", - "RBRACE", - "RBRACKET" +glued_operators = ["MULT", "PLUS", "MINUS", "DIV", "NOT", "BWISE_NOT"] + +spec_operators = [ + "NOT", + "BWISE_NOT", + "DIV", ] +rnests = ["RPARENTHESIS", "RBRACE", "RBRACKET"] + lnests = [ "LBRACE", "LBRACKET", "LPARENTHESIS", ] -left_auth = [ -] +left_auth = [] -right_auth = [ -] +right_auth = [] -whitespaces = [ - "NEWLINE", - "SPACE", - "TAB" -] +whitespaces = ["NEWLINE", "SPACE", "TAB"] -class CheckOperatorsSpacing(Rule): - def __init__(self): - super().__init__() - self.depends_on = [ - "IsFuncDeclaration", - "IsFuncPrototype", - "IsExpressionStatement", - "IsAssignation", - "IsControlStatement", - "IsVarDeclaration", - "IsFunctionCall", - "IsDeclaration", - ] - self.last_seen_tkn = None - def check_prefix(self, context, pos): - tmp = -1 +class CheckOperatorsSpacing(Rule, Check): + depends_on = ( + "IsFuncDeclaration", + "IsFuncPrototype", + "IsExpressionStatement", + "IsAssignation", + "IsControlStatement", + "IsVarDeclaration", + "IsFunctionCall", + "IsDeclaration", + ) - if pos > 0 and context.peek_token(pos - 1).type != "SPACE": - context.new_error("SPC_BFR_OPERATOR", context.peek_token(pos)) - if pos + 1 < len(context.tokens[:context.tkn_scope]) \ - and context.peek_token(pos + 1).type == "SPACE": + def check_prefix(self, context, pos): + if pos > 0 and context.check_token(pos, ["TAB", "SPACE"]): + context.new_error("", context.peek_token(pos)) + if ( + pos + 1 < len(context.tokens[: context.tkn_scope]) + and context.peek_token(pos + 1).type == "SPACE" + ): context.new_error("NO_SPC_AFR_OPR", context.peek_token(pos)) def check_lnest(self, context, pos): - if context.history[-1] == "IsFuncDeclaration" or context.history[-1] == "IsFuncPrototype": + if ( + context.history[-1] == "IsFuncDeclaration" + or context.history[-1] == "IsFuncPrototype" + ): return False tmp = pos + 1 - #Here is `(_` - while context.peek_token(tmp) and context.check_token(tmp, ["SPACE", "TAB"]) is True: + # Here is `(_` + while ( + context.peek_token(tmp) + and context.check_token(tmp, ["SPACE", "TAB"]) is True + ): tmp += 1 if context.check_token(tmp, "NEWLINE") is False: - if context.check_token(tmp, lnests + rnests + ["SEMI_COLON", "PTR", "DOT"]) is True and tmp != pos + 1: + if ( + context.check_token(tmp, lnests + rnests + ["SEMI_COLON", "PTR", "DOT"]) + is True + and tmp != pos + 1 + ): context.new_error("SPC_AFTER_PAR", context.peek_token(pos)) - elif context.check_token(tmp, lnests + rnests + ["SEMI_COLON", "PTR", "DOT"]) is False and tmp != pos + 1: + elif ( + context.check_token(tmp, lnests + rnests + ["SEMI_COLON", "PTR", "DOT"]) + is False + and tmp != pos + 1 + ): context.new_error("NO_SPC_AFR_PAR", context.peek_token(pos)) tmp = pos - 1 - #Here is `_(` + # Here is `_(` while tmp >= 0 and context.check_token(tmp, ["SPACE", "TAB"]) is True: tmp -= 1 if context.check_token(tmp, "NEWLINE") is False: - if context.check_token(tmp, lnests + rnests + ["SEMI_COLON", "PTR", "DOT", "INC", "DEC", "MULT", "BWISE_AND", "IDENTIFIER", "SIZEOF"]) is True and tmp != pos - 1: - if context.check_token(tmp, ["MULT", "BWISE_AND"]) is True and context.is_operator == False: + if ( + context.check_token( + tmp, + lnests + + rnests + + [ + "SEMI_COLON", + "PTR", + "DOT", + "INC", + "DEC", + "MULT", + "BWISE_AND", + "IDENTIFIER", + "SIZEOF", + ], + ) + is True + and tmp != pos - 1 + ): + if ( + context.check_token(tmp, ["MULT", "BWISE_AND"]) is True + and context.is_operator is False + ): context.new_error("NO_SPC_BFR_PAR", context.peek_token(pos)) - elif context.check_token(tmp, lnests + rnests + ["SEMI_COLON", "PTR", "DOT", "INC", "DEC", "MULT", "BWISE_AND", "BWISE_OR", "BWISE_XOR", "BWISE_NOT", "IDENTIFIER", "SIZEOF", "NOT", "MINUS", "PLUS"]) is False and tmp == pos - 1: + elif ( + context.check_token( + tmp, + lnests + + rnests + + [ + "SEMI_COLON", + "PTR", + "DOT", + "INC", + "DEC", + "MULT", + "BWISE_AND", + "BWISE_OR", + "BWISE_XOR", + "BWISE_NOT", + "IDENTIFIER", + "SIZEOF", + "NOT", + "MINUS", + "PLUS", + "CONSTANT", + "CHAR_CONSTANT", + "STRING", + ], + ) + is False + and tmp == pos - 1 + ): context.new_error("SPC_BFR_PAR", context.peek_token(pos)) return False def check_rnest(self, context, pos): - if context.history[-1] == "IsFuncDeclaration" or context.history[-1] == "IsFuncPrototype": + if ( + context.history[-1] == "IsFuncDeclaration" + or context.history[-1] == "IsFuncPrototype" + ): return False tmp = pos + 1 - #Here is `)_` - while context.peek_token(tmp) and context.check_token(tmp, ["SPACE", "TAB"]) is True: + # Here is `)_` + while ( + context.peek_token(tmp) + and context.check_token(tmp, ["SPACE", "TAB"]) is True + ): tmp += 1 if context.check_token(tmp, "NEWLINE") is False: - if context.check_token(tmp, lnests + rnests + ["SEMI_COLON", "PTR", "DOT", "INC", "DEC"]) is True and tmp != pos + 1: + if ( + context.check_token( + tmp, lnests + rnests + ["SEMI_COLON", "PTR", "DOT", "INC", "DEC"] + ) + is True + and tmp != pos + 1 + ): context.new_error("NO_SPC_AFR_PAR", context.peek_token(pos)) - elif context.check_token(tmp, lnests + rnests + ["SEMI_COLON", "PTR", "DOT", "INC", "DEC", "MULT", "BWISE_AND", "IDENTIFIER", "COMMA"]) is False and tmp == pos + 1: + elif ( + context.check_token( + tmp, + lnests + + rnests + + [ + "SEMI_COLON", + "PTR", + "DOT", + "INC", + "DEC", + "MINUS", + "MULT", + "BWISE_AND", + "IDENTIFIER", + "COMMA", + "STRING", + "CONSTANT", + "PLUS", + ], + ) + is False + and tmp == pos + 1 + ): context.new_error("SPC_AFTER_PAR", context.peek_token(pos)) tmp = pos - 1 - #Here is `_)` + # Here is `_)` while tmp > 0 and context.check_token(tmp, ["SPACE", "TAB"]) is True: tmp -= 1 if context.check_token(tmp, "NEWLINE") is False: - if context.check_token(tmp, lnests + rnests + ["SEMI_COLON", "PTR", "DOT", "INC", "DEC", "MULT", "BWISE_AND", "IDENTIFIER"]) is True and tmp != pos - 1: + if ( + context.check_token( + tmp, + lnests + + rnests + + [ + "SEMI_COLON", + "PTR", + "DOT", + "INC", + "DEC", + "MULT", + "BWISE_AND", + "IDENTIFIER", + "CONSTANT", + ], + ) + is True + and tmp != pos - 1 + ): context.new_error("NO_SPC_BFR_PAR", context.peek_token(pos)) return False def check_suffix(self, context, pos): - if pos + 1 < len(context.tokens[:context.tkn_scope]) \ - and not context.check_token(pos + 1, ["SPACE", "NEWLINE", "TAB"]): + if pos + 1 < len( + context.tokens[: context.tkn_scope] + ) and not context.check_token( + pos + 1, ["SPACE", "NEWLINE", "TAB"] + glued_operators + rnests + ): context.new_error("SPC_AFTER_OPERATOR", context.peek_token(pos)) if pos > 0 and context.peek_token(pos - 1).type == "SPACE": context.new_error("NO_SPC_BFR_OPR", context.peek_token(pos)) @@ -221,48 +338,121 @@ def check_glued_prefix_and_suffix(self, context, pos): tmp = -1 while context.check_token(pos + tmp, "TAB") is True: tmp -= 1 - if context.check_token(pos + tmp, "NEWLINE") is True: + if ( + context.check_token( + pos + tmp, ["NEWLINE", "ESCAPED_NEWLINE"] + glued_operators + ) + is True + ): return False, 0 context.new_error("SPC_BFR_OPERATOR", context.peek_token(pos)) - if pos + 1 < len(context.tokens[:context.tkn_scope]) and context.check_token(pos + 1, ["SPACE", "LPARENTHESIS", "LBRACKET", "LBRACE", "NEWLINE"]) is False: + if ( + pos + 1 < len(context.tokens[: context.tkn_scope]) + and context.check_token( + pos + 1, + ["SPACE", "LPARENTHESIS", "LBRACKET", "LBRACE", "NEWLINE"] + + glued_operators, + ) + is False + ): context.new_error("SPC_AFTER_OPERATOR", context.peek_token(pos)) def check_prefix_and_suffix(self, context, pos): - if pos > 0 and context.check_token(pos - 1, ['SPACE', 'LPARENTHESIS', 'RPARENTHESIS', "LBRACKET", 'RBRACKET']) is False: + if ( + pos > 0 + and context.check_token( + pos - 1, ["SPACE", "LPARENTHESIS", "LBRACKET"] + glued_operators + ) + is False + ): if context.check_token(pos - 1, "TAB") is True: tmp = -1 while context.check_token(pos + tmp, "TAB") is True: tmp -= 1 - if context.check_token(pos + tmp, "NEWLINE") is True: + if ( + context.check_token(pos + tmp, ["NEWLINE", "ESCAPED_NEWLINE"]) + is True + ): return False, 0 + if ( + context.check_token(pos - 1, "RPARENTHESIS") + and context.parenthesis_contain(context.skip_nest_reverse(pos - 1))[0] + == "cast" + ): + return False, 0 context.new_error("SPC_BFR_OPERATOR", context.peek_token(pos)) - if pos + 1 < len(context.tokens[:context.tkn_scope]) \ - and context.check_token(pos + 1, ['SPACE', 'LPARENTHESIS', 'RPARENTHESIS', "LBRACKET", 'RBRACKET']) is False: - context.new_error("SPC_AFTER_OPERATOR", context.peek_token(pos)) + if ( + pos + 1 < len(context.tokens[: context.tkn_scope]) + and context.check_token( + pos + 1, + [ + "SPACE", + "LPARENTHESIS", + "RPARENTHESIS", + "LBRACKET", + "RBRACKET", + "NEWLINE", + "COMMA", + ] + + spec_operators, + ) + is False + ): + tmp = pos - 1 + while context.check_token(tmp, ["SPACE", "TAB"]): + tmp -= 1 + if context.check_token(tmp, "RPARENTHESIS"): + tmp = context.skip_nest_reverse(tmp) + if context.parenthesis_contain(tmp)[0] != "cast": + context.new_error("SPC_AFTER_OPERATOR", context.peek_token(pos)) + elif context.check_token(tmp, glued_operators) is False and not ( + context.check_token(pos, ["PLUS", "MINUS"]) + and context.check_token(pos + 1, "CONSTANT") + ): + context.new_error("SPC_AFTER_OPERATOR", context.peek_token(pos)) def check_glued_operator(self, context, pos): glued = [ - 'LPARENTHESIS', - 'LBRACKET', - 'LBRACE', + "LPARENTHESIS", + "LBRACKET", + "LBRACE", ] if context.check_token(pos + 1, ["SPACE", "TAB"]) is True: context.new_error("SPC_AFTER_OPERATOR", context.peek_token(pos)) pos -= 1 - if context.check_token(pos, glued + ['SPACE', 'TAB']) is False: - context.new_error("SPC_BFR_OPERATOR", context.peek_token(pos)) - while pos >= 0 and context.check_token(pos, ['SPACE', 'TAB']) is True: + if ( + context.check_token(pos, glued + ["SPACE", "TAB"] + glued_operators) + is False + ): + context.new_error("SPC_BFR_OPERATOR", context.peek_token(pos)) + while pos >= 0 and context.check_token(pos, ["SPACE", "TAB"]) is True: pos -= 1 if pos >= 0 and context.check_token(pos, glued) is True: context.new_error("NO_SPC_BFR_OPR", context.peek_token(pos)) - def check_combined_op(self, context, pos): - lpointer = ["SPACE", "TAB", "LPARENTHESIS", "LBRACKET", "MULT", "NOT", "RPARENTHESIS", "RBRACKET", "RBRACE"] - lsign = operators + ["LBRACKET"] + lpointer = [ + "SPACE", + "TAB", + "LPARENTHESIS", + "LBRACKET", + "MULT", + "NOT", + "RPARENTHESIS", + "RBRACKET", + "RBRACE", + "MINUS", + "PLUS", + "BWISE_NOT", + "BWISE_OR", + "BWISE_AND", + "BWISE_XOR", + ] i = 0 if context.peek_token(pos).type == "MULT": - if context.check_token(pos - 1, lpointer) == False and context.is_glued_operator(pos - 1) is True: + if context.check_token(pos - 1, lpointer) is False and ( + context.is_glued_operator(pos - 1) is True + ): # or context.check_token(pos - 1, c_operators) is False): context.new_error("SPC_BFR_POINTER", context.peek_token(pos)) if context.check_token(pos + 1, ["SPACE", "TAB"]): context.new_error("SPC_AFTER_POINTER", context.peek_token(pos)) @@ -271,23 +461,22 @@ def check_combined_op(self, context, pos): i += 1 if context.peek_token(pos + i).type == "SPACE": context.new_error("SPC_AFTER_POINTER", context.peek_token(pos + i)) - return (i) + return i def run(self, context): """ - Some operators must be followed by a space, - some must be only followed by a space, - and the rest must be preceded and followed by a space. + Some operators must be followed by a space, + some must be only followed by a space, + and the rest must be preceded and followed by a space. """ - self.last_seen_tkn = None i = 0 - while i < len(context.tokens[:context.tkn_scope]): + while i < len(context.tokens[: context.tkn_scope]): if context.check_token(i, ["MULT", "BWISE_AND"]) is True: if context.is_operator(i) is False: self.check_combined_op(context, i) i += 1 continue - if context.check_token(i, c_operators)is True: + if context.check_token(i, c_operators) is True: if context.is_glued_operator(i) is True: self.check_glued_operator(context, i) else: @@ -298,18 +487,18 @@ def run(self, context): self.check_lnest(context, i) elif context.check_token(i, rnests) is True: self.check_rnest(context, i) - elif context.check_token(i, ps_operators)is True: + elif context.check_token(i, ps_operators) is True: self.check_prefix_and_suffix(context, i) - elif context.check_token(i, gps_operators)is True: + elif context.check_token(i, gps_operators) is True: self.check_glued_prefix_and_suffix(context, i) - elif context.check_token(i, s_operators)is True: + elif context.check_token(i, s_operators) is True: self.check_suffix(context, i) - elif context.check_token(i, son_operators) is True and \ - context.check_token(i + 1, "NEWLINE") is False: + elif ( + context.check_token(i, son_operators) is True + and context.check_token(i + 1, "NEWLINE") is False + ): self.check_suffix(context, i) - elif context.check_token(i, p_operators)is True: + elif context.check_token(i, p_operators) is True: self.check_prefix(context, i) - if context.check_token(i, whitespaces) is False: - self.last_seen_tkn = context.peek_token(i) i += 1 return False, 0 diff --git a/norminette/rules/check_preprocessor_define.py b/norminette/rules/check_preprocessor_define.py index 4c47e5fe..66624244 100644 --- a/norminette/rules/check_preprocessor_define.py +++ b/norminette/rules/check_preprocessor_define.py @@ -1,98 +1,54 @@ -from rules import Rule -from lexer import Lexer, TokenError -from scope import * +from norminette.rules import Rule, Check -class CheckPreprocessorDefine(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsPreprocessorStatement"] - - def skip_define_nest(self, i, tkns): - eq = { - 'LPARENTHESIS': 'RPARENTHESIS', - 'LBRACKET': 'RBRACKET', - 'LBRACE': 'RBRACE' - } - eq_val = eq[tkns[i].type] - while i < len(tkns) and tkns[i].type != eq_val: - i += 1 - return i - - def check_function_declaration(self, context): - i = context.skip_ws(0) - if context.check_token(i, ['#IF', "#ELSE", "IFDEF", "IFNDEF"]) is False: - return - context.tmp_scope = context.scope - context.scope = context.scope.get_outer() +class CheckPreprocessorDefine(Rule, Check): + depends_on = ( + "IsPreprocessorStatement", + ) def run(self, context): """ - Preprocessor statements must be defined only in the global scope - Defined names must be in capital letters - Define cannot contain newlines - Define can only contain constant values, such as integers and strings + Defined names must be in capital letters + Define can only contain constant values, such as integers and strings """ i = context.skip_ws(0) - if len(context.history) > 1 and context.history[-2] == "IsFuncDeclaration": - self.check_function_declaration(context) - if type(context.scope) is not GlobalScope: - if type(context.scope) == Function and context.scope.multiline == False: - pass - else: - context.new_error("PREPROC_GLOBAL", context.peek_token(0)) - if context.check_token(i, "DEFINE") is False: - return False, 0 - val = context.peek_token(i).value.split("define", 1)[1] - content = Lexer(val, context.peek_token(i).pos[0]) - tkns = content.get_tokens() - i = 0 - identifiers = [] - protection = context.filename.upper().split('/')[-1].replace('.', '_') - for tkn in tkns: - if tkn.type == "ESCAPED_NEWLINE": - context.new_error("NEWLINE_DEFINE", tkn) - elif tkn.type in ["TAB", "SPACE"]: - i += 1 - continue - elif tkn.type == "IDENTIFIER" and len(identifiers) == 0: - if tkn.value.isupper() is False: - context.new_error("MACRO_NAME_CAPITAL", tkn) - identifiers.append(tkn) - tmp = i - while tmp < len(tkns) - 1 and tkns[tmp].type in ["SPACE", "TAB", "IDENTIFIER"]: - tmp += 1 - if tmp == (len(tkns) - 1) and context.filetype == 'h': - if context.scope.header_protection == 0: - if identifiers[0].value == protection: - context.scope.header_protection = 1 - elif identifiers[0].value != protection: - context.new_error("HEADER_PROT_NAME", tkns[1]) - elif context.filetype == 'c' and context.scope.include_allowed == True and \ - (len(tkns) > tmp + 1 or (len(tkns) == tmp + 1 and identifiers[0].value != protection \ - and context.scope.header_protection == -1 )): - context.scope.include_allowed = False + i += 1 # skip HASH + i = context.skip_ws(i) + if not context.check_token(i, "IDENTIFIER"): + return + if not context.peek_token(i).value == "define": + return + if context.preproc.skip_define: + return + i += 1 # skip DEFINE + i = context.skip_ws(i) + + if not context.peek_token(i).value.isupper(): + context.new_error("MACRO_NAME_CAPITAL", context.peek_token(i)) + i += 1 # skip macro name - elif tkn.type in ["IDENTIFIER", "STRING", "CONSTANT"]: - if len(identifiers) == 1: - if tkn.type == "IDENTIFIER" and tkn.value.isupper() is False: - context.new_error("PREPROC_CONSTANT", tkn) - identifiers.append(tkn) - elif len(identifiers) == 0: - context.new_error("INCORRECT_DEFINE", tkn) - else: - context.new_error("TOO_MANY_VALS", tkn) - elif tkn.type == "LPARENTHESIS": - if len(identifiers) == 0: - continue - elif len(identifiers) == 1 and tkns[i - 1].type in ["SPACE", "TAB"]: - continue - else: - context.new_error("PREPROC_CONSTANT", tkn) - elif tkn.type in ["LBRACKET", "LBRACE"]: - context.new_error("PREPROC_CONSTANT", tkn) + if context.check_token(i, "LPARENTHESIS"): + context.new_error("MACRO_FUNC_FORBIDDEN", context.peek_token(i)) + while not context.check_token(i, "RPARENTHESIS"): + i += 1 + i += 1 + i = context.skip_ws(i) + # It is obscure what `#define` can hold in its value, see: + # - https://github.com/42School/norminette/issues/12 + # - https://github.com/42School/norminette/issues/127 + # - https://github.com/42School/norminette/issues/282 + # + if context.check_token(i, ("MINUS", "PLUS", "BWISE_NOT")): i += 1 - if context.filetype == 'h' and context.scope.header_protection != 1: - context.new_error("HEADER_PROT_ALL", context.peek_token(0)) - return False, 0 + i = context.skip_ws(i) + if not context.check_token(i, ("CONSTANT", "IDENTIFIER")): + context.new_error("PREPROC_CONSTANT", context.peek_token(i)) + return + i += 1 + elif context.check_token(i, ("CONSTANT", "IDENTIFIER", "STRING", "CHAR_CONST")): + i += 1 + + i = context.skip_ws(i, comment=True) + if context.peek_token(i) and not context.check_token(i, "NEWLINE"): + context.new_error("PREPROC_CONSTANT", context.peek_token(i)) diff --git a/norminette/rules/check_preprocessor_include.py b/norminette/rules/check_preprocessor_include.py index 1778fae3..9b4eabee 100644 --- a/norminette/rules/check_preprocessor_include.py +++ b/norminette/rules/check_preprocessor_include.py @@ -1,44 +1,59 @@ -from rules import Rule -from lexer import Lexer, TokenError -from scope import * +import os.path +import itertools +from norminette.rules import Rule, Check -class CheckPreprocessorInclude(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsPreprocessorStatement"] + +class CheckPreprocessorInclude(Rule, Check): + depends_on = ( + "IsPreprocessorStatement", + ) def run(self, context): """ - Includes must be at the start of the file - You cannot include anything that isn't an header file + Includes must be at the start of the file + You cannot include anything that isn't an header file """ - i = 0 - filetype = '' - if context.check_token(i, "INCLUDE") is False: + i = hash_index = context.skip_ws(0) + i += 1 # skip HASH + i = context.skip_ws(i) + if context.check_token(i, "IDENTIFIER") is False: + return False, 0 + if context.peek_token(i).value != "include": return False, 0 - if type(context.scope) is not GlobalScope or context.scope.include_allowed == False: - context.new_error("INCLUDE_START_FILE", context.peek_token(i)) - return True, i - val = context.peek_token(i).value.split("include", 1)[1] - content = Lexer(val, context.peek_token(i).pos[0]) - tkns = content.get_tokens() - i = 1 - while i < len(tkns) and tkns[i].type in ["TAB", "SPACE"]: - i += 1 - if i < len(tkns) and tkns[i].type == "LESS_THAN": - i = len(tkns) - 1 - while i > 0: - if i < len(tkns) - 1 and tkns[i].type == "DOT": - i += 1 - filetype = tkns[i].value - break - i -= 1 - elif i < len(tkns) and tkns[i].type == "STRING": - try: - filetype = tkns[i].value.split('.')[-1][0] - except: - filetype = '' - if filetype and filetype != 'h': - context.new_error("INCLUDE_HEADER_ONLY", context.peek_token(0)) - return False, 0 + if not self.is_in_start_of_file(context): + context.new_error("INCLUDE_START_FILE", context.peek_token(hash_index)) + + i += 1 # skip INCLUDE + i = context.skip_ws(i) + if context.check_token(i, "STRING"): # "niumxp.h" + file = context.peek_token(i).value.strip().strip('"') + file, extension = os.path.splitext(file) + if extension != ".h": + context.new_error("INCLUDE_HEADER_ONLY", context.peek_token(i)) + else: # + less = context.peek_token(i) + while not context.check_token(i, "MORE_THAN"): + i += 1 + last = context.peek_token(i - 1) + prev = context.peek_token(i - 2) + if last.type != "IDENTIFIER" or not (last.value == "h" and prev.type == "DOT"): + context.new_error("INCLUDE_HEADER_ONLY", less) + + i += 1 # skip MORE_THAN or STRING + + return True, i + + def is_in_start_of_file(self, context): + """Check if the include is at the start of the file + """ + headers = ( + "IsComment", + "IsEmptyLine", + "IsPreprocessorStatement", + ) + history = itertools.filterfalse(lambda item: item in headers, context.history) + return ( + context.scope.include_allowed + and next(history, None) is None + ) diff --git a/norminette/rules/check_preprocessor_indent.py b/norminette/rules/check_preprocessor_indent.py index 07b834fe..1d5e766f 100644 --- a/norminette/rules/check_preprocessor_indent.py +++ b/norminette/rules/check_preprocessor_indent.py @@ -1,51 +1,93 @@ -from rules import Rule +from norminette.rules import Rule, Check +from norminette.scope import GlobalScope -ALLOWED_PREPROC = ["DEFINE", "IFNDEF", "IFDEF", "#IF", "ELIF", "#ELSE", "ENDIF", "INCLUDE"] -TOO_MUCH_INDENT = ["IFNDEF", "IFDEF", "ELIF", "#IF", "#ELSE"] +ARGUMENTED_PREPROCESSORS = ( + "include", + "import", + "if", # just in case + None, # "if" is a special case ... + "ifdef", + "ifndef", + "elif", + "error", + "pragma", + "undef", + "define", +) -class CheckPreprocessorIndent(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsPreprocessorStatement"] - - def get_space_number(self, val): - val = val[1:] - spaces = 0 - for i in val: - if i == ' ': - spaces += 1 - else: - return spaces +class CheckPreprocessorIndent(Rule, Check): + depends_on = ( + "IsPreprocessorStatement", + ) def run(self, context): """ - Preprocessor statements must be indented by an additionnal space for each #ifdef/#ifndef/#if - statement. - Structure is `#{indentation}preproc_statement` - Preprocessor must always be at the start of the line + Preprocessor statements must be indented by an additionnal space for each #ifdef/#ifndef/#if + statement. + Structure is `#{indentation}preproc_statement` + Preprocessor must always be at the start of the line + Argumented preprocessor statements must have a space between the identifier and the argument """ - i = 0 - i = context.skip_ws(i) - tken = context.peek_token(i) - current_indent = context.preproc_scope_indent - if context.peek_token(i).pos[1] != 1: - context.new_error("PREPROC_START_LINE", context.peek_token(0)) - tken = context.peek_token(i) - if context.check_token(i, ALLOWED_PREPROC) is False: - context.new_error("PREPROC_UKN_STATEMENT", context.peek_token(i)) - if context.check_token(i, TOO_MUCH_INDENT) is True: - current_indent -= 1 - if current_indent < 0: - current_indent = 0 - fmt = '' - val = tken.value[1:] if tken.value else tken.type - spaces = self.get_space_number(tken.value if tken.value else tken.type) - if current_indent != spaces: - context.new_error("PREPROC_BAD_INDENT", context.peek_token(i)) + i = context.skip_ws(0) + hash_ = context.peek_token(i) + if hash_ and hash_.line_column != 1: + context.new_error("PREPROC_START_LINE", hash_) + if not isinstance(context.scope, GlobalScope): + context.new_error("PREPOC_ONLY_GLOBAL", hash_) i += 1 - tken = context.peek_token(i) - if tken is not None and tken.type not in ["NEWLINE", "COMMENT", "MULT_COMMENT"]: - context.new_error("PREPROC_EXPECTED_EOL", context.peek_token(i)) - return False, 0 + + # Empty preprocessor statement (only #) + k = context.skip_ws(i, comment=True) + if context.check_token(k, "NEWLINE"): + return + + n = context.skip_ws(i) + while context.check_token(i, "SPACE"): + i += 1 + if context.check_token(i, "TAB"): + context.new_error("TAB_REPLACE_SPACE", context.peek_token(i)) + i = n + + # Check indentation + spaces = context.peek_token(i).line_column - hash_.line_column - 1 + indent = context.preproc.indent + if context.check_token(i, ("IF", "ELSE")): + indent -= 1 + else: + t = context.peek_token(i) + if t and t.type == "IDENTIFIER" and t.value.upper() in ("IFNDEF", "IFDEF", "ELIF"): + indent -= 1 + indent = max(0, indent) + if spaces > indent: + context.new_error("TOO_MANY_WS", hash_) + if spaces < indent: + context.new_error("PREPROC_BAD_INDENT", hash_) + + # Check spacing after preproc identifier + if ( + context.check_token(i, ("IDENTIFIER", "IF")) + and context.peek_token(i).value in ARGUMENTED_PREPROCESSORS + ): + i += 1 + # BUG: #error/warning with a "comment" (`#error // Hello`) will be + # ignored, but it's not a big deal. + n = context.skip_ws(i, comment=True) + if context.check_token(n, "NEWLINE"): + return + # The idea is to avoid: + # - `#include"libft.h"` (no space) + # - `#include "libft.h"` (tab) + # - `#include "libft.h"` (two spaces) + # Note that only `#include "libft.h"` is valid and we also check + # for `ifdef`, `ifndef`, etc. + if not context.check_token(i, ("SPACE", "TAB")): + context.new_error("PREPROC_NO_SPACE", context.peek_token(i)) + j = i + while context.check_token(i, "SPACE"): + i += 1 + if context.check_token(i, "TAB"): + context.new_error("TAB_REPLACE_SPACE", context.peek_token(i)) + if context.skip_ws(j) - j > 1: + context.new_error("CONSECUTIVE_WS", context.peek_token(j)) diff --git a/norminette/rules/check_preprocessor_protection.py b/norminette/rules/check_preprocessor_protection.py index 5e9ba6a9..930c3cc3 100644 --- a/norminette/rules/check_preprocessor_protection.py +++ b/norminette/rules/check_preprocessor_protection.py @@ -1,43 +1,69 @@ -from rules import Rule -from lexer import Lexer, TokenError -from scope import * +import itertools +from norminette.rules import Rule, Check -class CheckPreprocessorProtection(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsPreprocessorStatement"] + +class CheckPreprocessorProtection(Rule, Check): + depends_on = ( + "IsPreprocessorStatement", + ) def run(self, context): """ - Header protection must be as follows: - ``` - #ifndef __FILENAME_H__ - # define __FILENAME_H__ - #endif - ``` - Any header instruction must be within the header protection + Header protection must be as follows: + ```c + #ifndef __FILENAME_H__ + # define __FILENAME_H__ + #endif + ``` + Any header instruction must be within the header protection """ - i = 0 - if type(context.scope) is not GlobalScope: + if context.file.type != ".h": + return False, 0 + i = context.skip_ws(0) + hash = context.peek_token(i) + i += 1 # Skip the HASH + i = context.skip_ws(i) + if not context.check_token(i, "IDENTIFIER"): + return False, 0 + # TODO: Add to check if macro definition is bellow #ifndef + t = context.peek_token(i) + if not t or t.type != "IDENTIFIER" or t.value.upper() not in ("IFNDEF", "ENDIF"): + return False, 0 + i += 1 + guard = context.file.basename.upper().replace(".", "_") + if t.value.upper() == "ENDIF": + if context.preproc.indent == 0 and not context.protected: + i = context.skip_ws(i, nl=True, comment=True) + if context.peek_token(i) is not None: + context.new_error("HEADER_PROT_ALL_AF", context.peek_token(i)) + if not context.preproc.has_macro_defined(guard): + context.new_error("HEADER_PROT_NODEF", hash) + context.protected = True return False, 0 - if context.check_token(i, ["IFNDEF", "ENDIF"]) is False or context.filetype != 'h': + if context.preproc.indent != 1: return False, 0 - protection = context.filename.upper().split('/')[-1].replace('.', '_') - val = context.peek_token(i).value.split(' ')[-1] - content = Lexer(val, context.peek_token(i).pos[0]) - tkns = content.get_tokens() - if context.check_token(i, "IFNDEF") is True: - if len(tkns) >= 1 and tkns[0].value == protection and context.scope.header_protection == -1 and context.preproc_scope_indent == 1: - if len(context.history) > 1: - for i in range(len(context.history) - 2, 0, -1): - if context.history[i] != "IsEmptyLine" and context.history[i] != "IsComment": - context.new_error("HEADER_PROT_ALL", context.peek_token(0)) - break - context.scope.header_protection = 0 - elif len(tkns) < 1 or (tkns[0].value != protection and context.scope.header_protection == -1): - context.new_error("HEADER_PROT_NAME", context.peek_token(0)) - elif context.check_token(i, "ENDIF") is True: - if context.scope.header_protection == 1 and context.preproc_scope_indent == 0: - context.scope.header_protection = 2 - return False, 0 \ No newline at end of file + i = context.skip_ws(i) + macro = context.peek_token(i).value + if macro != guard and not context.protected: + if macro.upper() == guard: + context.new_error("HEADER_PROT_UPPER", context.peek_token(i)) + else: + context.new_error("HEADER_PROT_NAME", context.peek_token(i)) + + if context.protected: + context.new_error("HEADER_PROT_MULT", hash) + return False, 0 + + headers = ( + "IsComment", + "IsEmptyLine", + ) + history = context.history[:-1] # Remove the current `IsPreprocessorStatement` + history = itertools.filterfalse(lambda item: item in headers, history) + if next(history, None): + # We can't say what line contains the instruction outside + # header protection due to limited history information. + context.new_error("HEADER_PROT_ALL", hash) + + return False, 0 diff --git a/norminette/rules/check_prototype_indent.py b/norminette/rules/check_prototype_indent.py index e97c723c..a65bae8f 100644 --- a/norminette/rules/check_prototype_indent.py +++ b/norminette/rules/check_prototype_indent.py @@ -1,5 +1,7 @@ -from rules import Rule import math + +from norminette.rules import Rule, Check + keywords = [ # C reserved keywords # "AUTO", @@ -33,22 +35,20 @@ "UNSIGNED", "VOID", "VOLATILE", - "WHILE", + "WHILE", "IDENTIFIER", ] -eol = [ - "SEMI_COLON", - "LPARENTHESIS" -] +eol = ["SEMI_COLON", "LPARENTHESIS"] -class CheckPrototypeIndent(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsFuncPrototype"] + +class CheckPrototypeIndent(Rule, Check): + depends_on = ( + "IsFuncPrototype", + ) def run(self, context): """ - All function prototypes names must be aligned on the same indentation + All function prototypes names must be aligned on the same indentation """ i = 0 type_identifier_nb = -1 @@ -56,8 +56,13 @@ def run(self, context): id_length = 0 buffer_len = 0 while context.check_token(i, ["SEMI_COLON"]) is False: + if context.check_token(i, "IDENTIFIER") is True and context.peek_token(i).value == "__attribute__": + i += 1 + i = context.skip_ws(i) + i = context.skip_nest(i) + 1 + type_identifier_nb += 1 if context.check_token(i, "LPARENTHESIS") is True: - if context.parenthesis_contain(i)[0] == 'pointer': + if context.parenthesis_contain(i)[0] == "pointer": i += 1 continue else: @@ -67,7 +72,14 @@ def run(self, context): i += 1 i = 0 while context.check_token(i, eol) is False: - if context.check_token(i, keywords) is True and type_identifier_nb > 0: + if context.check_token(i, "IDENTIFIER") is True and context.peek_token(i).value == "__attribute__": + if type_identifier_nb > 0: + context.new_error("ATTR_EOL", context.peek_token(i)) + i += 1 + i = context.skip_ws(i) + i = context.skip_nest(i) + type_identifier_nb -= 1 + elif context.check_token(i, keywords) is True and type_identifier_nb > 0: type_identifier_nb -= 1 if context.peek_token(i).length == 0: id_length += len(str(context.peek_token(i))) - 2 @@ -91,4 +103,4 @@ def run(self, context): return True, i return False, 0 i += 1 - return False, 0 \ No newline at end of file + return False, 0 diff --git a/norminette/rules/check_spacing.py b/norminette/rules/check_spacing.py index 1630a4de..e11699ff 100644 --- a/norminette/rules/check_spacing.py +++ b/norminette/rules/check_spacing.py @@ -1,24 +1,25 @@ -from rules import Rule +from norminette.rules import Rule, Check -class CheckSpacing(Rule): - def __init__(self): - super().__init__() - self.depends_on = [] - +class CheckSpacing(Rule, Check): def run(self, context): """ - Indentation (except for preprocessors) must be done with tabs - There cannot be trailing spaces or tabs at the end of line + Indentation (except for preprocessors) must be done with tabs + There cannot be trailing spaces or tabs at the end of line """ i = 0 - if context.history[-1] == "IsEmptyLine": + if context.history[-1] in ("IsEmptyLine", "IsPreprocessorStatement"): return False, 0 - while i in range(len(context.tokens[:context.tkn_scope])): + space_tab_error = False + space_error = False + while i in range(len(context.tokens[: context.tkn_scope])): if context.check_token(i, "SPACE"): + if context.check_token(i - 1 if i > 0 else 0, "TAB"): + if space_tab_error is False: + context.new_error("MIXED_SPACE_TAB", context.peek_token(i - 1)) + space_tab_error = True if context.peek_token(i).pos[1] == 1: - while i < context.tkn_scope \ - and context.check_token(i, "SPACE"): + while i < context.tkn_scope and context.check_token(i, "SPACE"): i += 1 if context.check_token(i + 1, "NEWLINE"): context.new_error("SPACE_EMPTY_LINE", context.peek_token(i)) @@ -26,17 +27,28 @@ def run(self, context): continue context.new_error("SPACE_REPLACE_TAB", context.peek_token(i)) continue + t = context.skip_ws(i) + if ( + t != i + and context.check_token(t, "NEWLINE") + # CheckBrace already check for spacing, we avoid duplicating error here + and not context.check_token(i-1, ("LBRACE", "RBRACE")) + ): + context.new_error("SPC_BEFORE_NL", context.peek_token(i)) i += 1 if context.check_token(i, "SPACE"): - context.new_error("CONSECUTIVE_SPC", context.peek_token(i - 1)) - while i < context.tkn_scope \ - and context.check_token(i, "SPACE"): + if space_error is False: + context.new_error("CONSECUTIVE_SPC", context.peek_token(i - 1)) + space_error = True + while i < context.tkn_scope and context.check_token(i, "SPACE"): i += 1 - if context.check_token(i, "NEWLINE"): - context.new_error("SPC_BEFORE_NL", context.peek_token(i - 1)) - elif context.check_token(i, ["TAB", "SPACE"]): + if context.check_token(i, "TAB"): + if space_tab_error is False: + context.new_error("MIXED_SPACE_TAB", context.peek_token(i - 1)) + space_tab_error = True + elif context.check_token(i, "TAB"): if context.peek_token(i).pos[1] == 1: - while context.check_token(i, ["TAB", "SPACE"]): + while context.check_token(i, "TAB"): i += 1 if context.check_token(i, "NEWLINE"): context.new_error("SPC_BEFORE_NL", context.peek_token(i - 1)) diff --git a/norminette/rules/check_struct_naming.py b/norminette/rules/check_struct_naming.py index ac5a4f0a..0c976836 100644 --- a/norminette/rules/check_struct_naming.py +++ b/norminette/rules/check_struct_naming.py @@ -1,5 +1,4 @@ -from rules import Rule -from lexer import Lexer, TokenError +from norminette.rules import Rule, Check types = [ "STRUCT", @@ -7,18 +6,17 @@ "UNION", ] -class CheckStructNaming(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsUserDefinedType"] - self.__i = 0 + +class CheckStructNaming(Rule, Check): + depends_on = ( + "IsUserDefinedType", + ) def run(self, context): """ - Rewritten elsewhere + Rewritten elsewhere """ return False, 0 - self.__i += 1 i = 0 i = context.skip_ws(i) while context.check_token(i, types) is False: @@ -28,13 +26,13 @@ def run(self, context): def_type = context.peek_token(i).type i += 1 i = context.skip_ws(i) - if def_type == "STRUCT": + if def_type == "STRUCT": if context.peek_token(i).value.startswith("s_") is False: context.new_error("STRUCT_TYPE_NAMING", context.peek_token(i)) - elif def_type == "ENUM": + elif def_type == "ENUM": if context.peek_token(i).value.startswith("e_") is False: context.new_error("ENUM_TYPE_NAMING", context.peek_token(i)) - elif def_type == "UNION": + elif def_type == "UNION": if context.peek_token(i).value.startswith("u_") is False: context.new_error("UNION_TYPE_NAMING", context.peek_token(i)) return False, i diff --git a/norminette/rules/check_ternary.py b/norminette/rules/check_ternary.py index 113b5865..de0c45d1 100644 --- a/norminette/rules/check_ternary.py +++ b/norminette/rules/check_ternary.py @@ -1,15 +1,18 @@ -from lexer import Token -from rules import Rule -import string +from norminette.rules import Rule, Check -class CheckTernary(Rule): - def __init__(self): - super().__init__() - self.depends_on = ['IsTernary'] +class CheckTernary(Rule, Check): def run(self, context): """ - Ternaries are forbidden + Ternaries are forbidden """ - context.new_error("TERNARY_FBIDDEN", context.peek_token(0)) + for i in range(0, context.tkn_scope): + if ( + context.check_token(i, "IDENTIFIER") + and context.peek_token(i).value == "define" + and context.preproc.skip_define + ): + return + if context.check_token(i, "TERN_CONDITION") is True: + context.new_error("TERNARY_FBIDDEN", context.peek_token(i)) return False, 0 diff --git a/norminette/rules/check_utype_declaration.py b/norminette/rules/check_utype_declaration.py index 7bb1a564..3915fd45 100644 --- a/norminette/rules/check_utype_declaration.py +++ b/norminette/rules/check_utype_declaration.py @@ -1,8 +1,5 @@ -from rules import Rule -from lexer import Lexer, TokenError -from scope import * -import math -from exceptions import CParsingError +from norminette.exceptions import CParsingError +from norminette.rules import Rule, Check types = [ "STRUCT", @@ -19,42 +16,49 @@ "STATIC", "IDENTIFIER", "SPACE", - "TAB" + "TAB", ] -utypes = [ - "STRUCT", - "ENUM", - "UNION" -] +utypes = ["STRUCT", "ENUM", "UNION"] -class CheckUtypeDeclaration(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsUserDefinedType"] + +class CheckUtypeDeclaration(Rule, Check): + depends_on = ( + "IsUserDefinedType", + ) def run(self, context): """ - User defined types must respect the following rules: - - Struct names start with s_ - - Enum names start with e_ - - Union names start with u_ - - Typedef names start with t_ + User defined types must respect the following rules: + - Struct names start with s_ + - Enum names start with e_ + - Union names start with u_ + - Typedef names start with t_ """ i = 0 i = context.skip_ws(i) - tkns = context.tokens + token = context.peek_token(i) + if context.scope.name not in ("GlobalScope", "UserDefinedType"): + context.new_error("TYPE_NOT_GLOBAL", token) + if ( + context.file.type == ".c" + and token.type in ("STRUCT", "UNION", "ENUM", "TYPEDEF") + and context.scope not in ("UserDefinedType", "UserDefinedEnum") + ): + context.new_error(f"FORBIDDEN_{token.type}", token) is_td = False on_newline = False utype = None contain_full_def = False ids = [] - while context.check_token(i, ['SEMI_COLON']) is False and i < len(context.tokens): - if context.check_token(i, ['SPACE', 'TAB']): + while context.check_token(i, ["SEMI_COLON"]) is False and i < len( + context.tokens + ): + if context.check_token(i, ["SPACE", "TAB"]): pass if context.check_token(i, ["LPARENTHESIS"]) is True: val, tmp = context.parenthesis_contain(i) - if val == None: + if val is None or val == "cast" or val == "var": i = tmp if context.check_token(i, utypes) is True: utype = context.peek_token(i) @@ -68,22 +72,32 @@ def run(self, context): i = context.skip_ws(i) i = context.skip_nest(i) continue - if context.check_token(i - 1, ["MULT", "BWISE_AND"]) is True: + if ( + context.check_token(i - 1, ["MULT", "BWISE_AND", "LPARENTHESIS"]) + is True + ): tmp = i - 1 - while context.check_token(tmp, ["MULT", "BWISE_AND"]) is True and context.is_operator(tmp) == False: + while ( + context.check_token( + tmp - 1, ["MULT", "BWISE_AND", "LPARENTHESIS"] + ) + is True + and context.is_operator(tmp) is False + ): tmp -= 1 ids.append((context.peek_token(i), tmp)) else: ids.append((context.peek_token(i), i)) - if context.check_token(i, 'LBRACE') is True: + if context.check_token(i, "LBRACE") is True: contain_full_def = True i = context.skip_nest(i) i += 1 check = -1 - if is_td == True and len(ids) < 2 and utype != None: + # print (ids, utype, contain_full_def) + if is_td is True and len(ids) < 2 and utype is not None: context.new_error("MISSING_TYPEDEF_ID", context.peek_token(0)) return False, 0 - if contain_full_def == False and is_td == False and len(ids) > 1: + if contain_full_def is False and is_td is False and len(ids) > 1: check = -2 else: check = -1 @@ -91,8 +105,10 @@ def run(self, context): return False, 0 name = ids[0][0] loc = ids[check][1] - if is_td == True: - if ids[check][0].value.startswith('t_') is False: + if is_td is True: + if not context.check_token(ids[check][1] - 1, ("SPACE", "TAB")): + context.new_error("NO_TAB_BF_TYPEDEF", ids[check][0]) + if ids[check][0].value.startswith("t_") is False: context.new_error("USER_DEFINED_TYPEDEF", context.peek_token(loc)) if utype is not None: if len(ids) > 1: @@ -101,36 +117,71 @@ def run(self, context): if context.debug >= 1: pass elif context.debug == 0: - raise CParsingError(f"{context.filename}: Could not parse structure line {context.peek_token(0).pos[0]}") + raise CParsingError( + f"Error: {context.filename}: Could not parse structure line {context.peek_token(0).pos[0]}" + ) loc = ids[0][1] else: loc = ids[0][1] - if utype is not None and utype.type == "STRUCT" and name.value.startswith('s_') is False: - context.new_error("STRUCT_TYPE_NAMING", context.peek_token(loc)) - if utype is not None and utype.type == "UNION" and name.value.startswith('u_') is False: - context.new_error("UNION_TYPE_NAMING", context.peek_token(loc)) - if utype is not None and utype.type == "ENUM" and name.value.startswith('e_') is False: - context.new_error("ENUM_TYPE_NAMING", context.peek_token(loc)) - if is_td or (is_td == False and contain_full_def == False): + if is_td is False: + if ( + utype is not None + and utype.type == "STRUCT" + and name.value.startswith("s_") is False + ): + context.new_error("STRUCT_TYPE_NAMING", context.peek_token(loc)) + if ( + utype is not None + and utype.type == "UNION" + and name.value.startswith("u_") is False + ): + context.new_error("UNION_TYPE_NAMING", context.peek_token(loc)) + if ( + utype is not None + and utype.type == "ENUM" + and name.value.startswith("e_") is False + ): + context.new_error("ENUM_TYPE_NAMING", context.peek_token(loc)) + if is_td or (is_td is False and contain_full_def is False): tmp = ids[-1][1] - 1 + tabs = 0 while (context.check_token(tmp, "TAB")) is True and tmp > 0: + tabs += 1 tmp -= 1 + # if tabs > 1: + # context.new_error("TOO_MANY_TABS_TD", context.peek_token(tmp)) if context.check_token(tmp, "SPACE") is True: context.new_error("SPACE_REPLACE_TAB", context.peek_token(tmp)) + tab_error = False + can_nl_error = False while tmp > 0: if context.check_token(tmp, "RBRACE") is True: + can_nl_error = True tmp = context.skip_nest_reverse(tmp) - if context.check_token(tmp, "TAB") is True and on_newline == False: - context.new_error("TAB_REPLACE_SPACE", context.peek_token(tmp)) + if context.check_token(tmp, "TAB") is True and on_newline is False: + tab_error = True + if ( + context.check_token(tmp, "NEWLINE") is True + and can_nl_error is False + ): + context.new_error("NEWLINE_IN_DECL", context.peek_token(ids[-1][1])) + can_nl_error = True tmp -= 1 - if contain_full_def == False: + if tab_error: + context.new_error("TAB_REPLACE_SPACE", context.peek_token(tmp)) + if contain_full_def is False: i = 0 - identifier = ids[-1][0] i = ids[-1][1] - if context.check_token(i - 1, ["MULT", "BWISE_AND", "LPARENTHESIS"]) is True: + if ( + context.check_token(i - 1, ["MULT", "BWISE_AND", "LPARENTHESIS"]) + is True + ): i -= 1 - while context.check_token(i, ["MULT", "BWISE_AND", "LPARENTHESIS"]) is True \ - and context.is_operator(i) is False: + while ( + context.check_token(i, ["MULT", "BWISE_AND", "LPARENTHESIS"]) + is True + and context.is_operator(i) is False + ): i -= 1 current_indent = context.peek_token(i).pos[1] if context.scope.vars_alignment == 0: @@ -138,4 +189,4 @@ def run(self, context): elif context.scope.vars_alignment != current_indent: context.new_error("MISALIGNED_VAR_DECL", context.peek_token(0)) return True, i - return False, 0 \ No newline at end of file + return False, 0 diff --git a/norminette/rules/check_variable_declaration.py b/norminette/rules/check_variable_declaration.py index eb0c58c4..83120a5e 100644 --- a/norminette/rules/check_variable_declaration.py +++ b/norminette/rules/check_variable_declaration.py @@ -1,5 +1,4 @@ -from rules import Rule -from scope import * +from norminette.rules import Rule, Check assigns = [ "RIGHT_ASSIGN", @@ -15,31 +14,53 @@ "ASSIGN", ] -class CheckVariableDeclaration(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsVarDeclaration"] + +class CheckVariableDeclaration(Rule, Check): + depends_on = ( + "IsVarDeclaration", + ) def run(self, context): """ - Variables can be declared as global or in the scope of a function - Only static variables, global variables, and constants can be initialised at declaration. - Each variable must be declared on a separate line + Variables can be declared as global or in the scope of a function + Only static variables, global variables, and constants can be initialised at declaration. + Each variable must be declared on a separate line """ i = 0 static_or_const = False passed_assign = False if context.scope.name == "Function": - if context.history[-2] != "IsBlockStart" and context.history[-2] != "IsVarDeclaration": + context.scope.vars += 1 + if context.scope.vars > 5: + context.new_error("TOO_MANY_VARS_FUNC", context.peek_token(i)) + if ( + context.history[-2] != "IsBlockStart" + and context.history[-2] != "IsVarDeclaration" + ): context.new_error("VAR_DECL_START_FUNC", context.peek_token(i)) - elif context.scope.vdeclarations_allowed == False: + elif context.scope.vdeclarations_allowed is False: context.new_error("VAR_DECL_START_FUNC", context.peek_token(i)) - elif context.scope.vdeclarations_allowed == None: + elif context.scope.vdeclarations_allowed is None: context.scope.vdeclarations_allowed = True - elif context.scope.name == "GlobalScope" or context.scope.name == "UserDefinedType": + elif ( + context.scope.name == "GlobalScope" + or context.scope.name == "UserDefinedType" + ): pass else: context.new_error("WRONG_SCOPE_VAR", context.peek_token(i)) + tmp = 0 + ret, tmp = context.check_type_specifier(tmp) + tmp = context.skip_ws(tmp) + tmp -= 1 + identifier = False + while context.check_token(tmp, ["SEMI_COLON"] + assigns) is False: + if context.check_token(tmp, "IDENTIFIER"): + identifier = True + tmp += 1 + if identifier is False: + context.new_error("IMPLICIT_VAR_TYPE", context.peek_token(0)) + return False while context.peek_token(i) and context.check_token(i, "SEMI_COLON") is False: if context.check_token(i, "LPARENTHESIS") is True: i = context.skip_nest(i) @@ -47,12 +68,12 @@ def run(self, context): passed_assign = True if context.check_token(i, ["STATIC", "CONST"]) is True: static_or_const = True - if context.check_token(i, assigns) is True and static_or_const == False: + if context.check_token(i, assigns) is True and static_or_const is False: if context.scope.name == "GlobalScope": i += 1 continue context.new_error("DECL_ASSIGN_LINE", context.peek_token(i)) - if context.check_token(i, "COMMA") is True and passed_assign == False: + if context.check_token(i, "COMMA") is True and passed_assign is False: context.new_error("MULT_DECL_LINE", context.peek_token(i)) i += 1 return False, 0 diff --git a/norminette/rules/check_variable_indent.py b/norminette/rules/check_variable_indent.py index 47a22e66..8c78fba5 100644 --- a/norminette/rules/check_variable_indent.py +++ b/norminette/rules/check_variable_indent.py @@ -1,8 +1,8 @@ -from rules import Rule -from scope import * import math import string +from norminette.rules import Rule, Check + keywords = [ # C reserved keywords # @@ -37,8 +37,8 @@ "UNSIGNED", "VOID", "VOLATILE", - "WHILE", - "IDENTIFIER" + "WHILE", + "IDENTIFIER", ] assigns_or_eol = [ "RIGHT_ASSIGN", @@ -54,14 +54,14 @@ "ASSIGN", "SEMI_COLON", "NEWLINE", - "COMMA" + "COMMA", ] -class CheckVariableIndent(Rule): - def __init__(self): - super().__init__() - self.depends_on = ["IsVarDeclaration"] +class CheckVariableIndent(Rule, Check): + depends_on = ( + "IsVarDeclaration", + ) def check_tabs(self, context): i = 0 @@ -74,7 +74,11 @@ def check_tabs(self, context): while context.check_token(i, assigns_or_eol) is False: if context.check_token(i, keywords) is True: type_identifier_nb += 1 - if context.check_token(i, ["LPARENTHESIS", "LBRACE", "LBRACKET"]) and type_identifier_nb > 0: + if ( + context.check_token(i, ["LPARENTHESIS", "LBRACE", "LBRACKET"]) + and type_identifier_nb > 0 + and context.parenthesis_contain(i)[0] != "pointer" + ): i = context.skip_nest(i) i += 1 i = 0 @@ -88,7 +92,9 @@ def check_tabs(self, context): elif context.check_token(i, "IDENTIFIER") is True: for c in context.peek_token(i).value: if c in string.ascii_lowercase: - context.new_error("VLA_FORBIDDEN", context.peek_token(i)) + context.new_error( + "VLA_FORBIDDEN", context.peek_token(i) + ) break return True, i i += 1 @@ -110,22 +116,28 @@ def check_tabs(self, context): has_tab += 1 current_indent += 1 type_identifier_nb -= 1 - elif context.check_token(i, "TAB") and type_identifier_nb > 0 and \ - line_start == False: + elif ( + context.check_token(i, "TAB") + and type_identifier_nb > 0 + and line_start is False + ): context.new_error("TAB_REPLACE_SPACE", context.peek_token(i)) i += 1 return False, 0 def run(self, context): """ - Each variable must be indented at the same level for its scope + Each variable must be indented at the same level for its scope """ i = 0 identifier = None - ident = [0,0] + ident = [0, 0] ret = None self.check_tabs(context) - while context.peek_token(i) and context.check_token(i, ["SEMI_COLON", "COMMA", "ASSIGN"]) is False: + while ( + context.peek_token(i) + and context.check_token(i, ["SEMI_COLON", "COMMA", "ASSIGN"]) is False + ): if context.check_token(i, ["LBRACKET", "LBRACE"]) is True: i = context.skip_nest(i) if context.check_token(i, "LPARENTHESIS") is True: @@ -139,8 +151,11 @@ def run(self, context): identifier = ident[0] if context.check_token(i - 1, ["MULT", "BWISE_AND", "LPARENTHESIS"]) is True: i -= 1 - while context.check_token(i - 1, ["MULT", "BWISE_AND", "LPARENTHESIS"]) is True \ - and context.is_operator(i) is False: + while ( + context.check_token(i - 1, ["MULT", "BWISE_AND", "LPARENTHESIS"]) + is True + and context.is_operator(i) is False + ): i -= 1 identifier = context.peek_token(i) if context.scope.vars_alignment == 0: @@ -148,4 +163,4 @@ def run(self, context): elif context.scope.vars_alignment != identifier.pos[1]: context.new_error("MISALIGNED_VAR_DECL", context.peek_token(i)) return True, i - return False, 0 \ No newline at end of file + return False, 0 diff --git a/norminette/rules/is_ambiguous_declaration.py b/norminette/rules/is_ambiguous_declaration.py index db4382ea..24f4f926 100644 --- a/norminette/rules/is_ambiguous_declaration.py +++ b/norminette/rules/is_ambiguous_declaration.py @@ -1,24 +1,17 @@ -from rules import PrimaryRule -from context import ControlStructure, Function, GlobalScope -from exceptions import CParsingError +from norminette.rules import Primary, Rule cs_keywords = ["DO", "WHILE", "FOR", "IF", "ELSE", "SWITCH", "CASE", "DEFAULT"] whitespaces = ["TAB", "SPACE", "NEWLINE"] -class IsAmbiguousDeclaration(PrimaryRule): - def __init__(self): - super().__init__() - self.priority = 0 - self.scope = [] - +class IsAmbiguousDeclaration(Primary, Rule, priority=0): def run(self, context): """ - Catches missing semi-colon or other various missing stuff. Dev feature + Catches missing semi-colon or other various missing stuff. Dev feature """ i = context.skip_ws(0, nl=False) while context.peek_token(i) and context.check_token(i, "NEWLINE") is False: if context.check_token(i, ["SEMI_COLON"]) is False: return False, 0 i += 1 - return True, i \ No newline at end of file + return True, i diff --git a/norminette/rules/is_assignation.py b/norminette/rules/is_assignation.py index b8206000..b059cd82 100644 --- a/norminette/rules/is_assignation.py +++ b/norminette/rules/is_assignation.py @@ -1,7 +1,4 @@ -from rules import PrimaryRule -from context import GlobalScope, VariableAssignation -from exceptions import CParsingError - +from norminette.rules import Rule, Primary assign_ops = [ "RIGHT_ASSIGN", @@ -16,7 +13,14 @@ "OR_ASSIGN", "ASSIGN", "INC", - "DEC" + "DEC", +] + +SEPARATORS = [ + "COMMA", + # "AND", + # "OR", + "SEMI_COLON", ] types = [ @@ -37,7 +41,7 @@ "CONST", "REGISTER", "STATIC", - "VOLATILE" + "VOLATILE", ] op = [ @@ -50,61 +54,68 @@ "PLUS", "DIV", "PTR", - "DOT" + "DOT", ] ws = ["SPACE", "TAB", "NEWLINE"] -class IsAssignation(PrimaryRule): - def __init__(self): - super().__init__() - self.primary = True - self.priority = 20 - self.scope = [] - - def skip_ptr(self, context, pos): - i = context.skip_ws(pos) - while context.check_token(i, operators + ws + ["IDENTIFIER"]) is True: - i += 1 - return i - +class IsAssignation(Rule, Primary, priority=20): def check_identifier(self, context, pos): i = pos - while context.check_token(i, types + ws + op + ["IDENTIFIER", "CONSTANT"]): + while context.check_token( + i, types + ws + op + ["IDENTIFIER", "CONSTANT", "INC", "DEC"] + ): if context.check_token(i, "LBRACKET"): - i = context.skip_nest(i) + i = context.skip_nest(i) i += 1 - if "IDENTIFIER" in [t.type for t in context.tokens[:i + 1]]: + if "IDENTIFIER" in [t.type for t in context.tokens[: i + 1]]: return True, i else: return False, 0 + def parse_assign_right_side(self, context, i): + while context.check_token(i, SEPARATORS) is False: + if context.check_token(i, ["LBRACE", "LPARENTHESIS", "LBRACKET"]): + i = context.skip_nest(i) + i += 1 + return i + def run(self, context): """ - Catches all assignation instructions - Requires assign token + Catches all assignation instructions + Requires assign token """ + # pdb.set_trace() ret, i = self.check_identifier(context, 0) if ret is False: return False, 0 - if context.check_token(i, assign_ops) is False: + tmp = 0 + while context.check_token(tmp, assign_ops) is False and tmp <= i: + tmp += 1 + if context.check_token(tmp, assign_ops) is False: return False, 0 - i += 1 + i = tmp + 1 + # i += 1 i = context.skip_ws(i) - #if context.check_token(i, "LBRACE") is True: - #i += 1 - #context.sub = context.scope.inner(VariableAssignation) - #return True, i + # if context.check_token(i, "LBRACE") is True: + # i += 1 + # context.sub = context.scope.inner(VariableAssignation) + # return True, i if context.scope.name == "UserDefinedEnum": - while context.peek_token(i) and (context.check_token(i, ['COMMA', 'SEMI_COLON', 'NEWLINE'])) is False: + while ( + context.peek_token(i) + and (context.check_token(i, ["COMMA", "SEMI_COLON", "NEWLINE"])) + is False + ): i += 1 i = context.eol(i) return True, i - while context.check_token(i, ["SEMI_COLON"]) is False: - i += 1 - if context.peek_token(i) is None: - return False, 0 + i = self.parse_assign_right_side(context, i) + # while context.check_token(i, SEPARATORS) is False: + # i += 1 + # if context.peek_token(i) is None: + # return False, 0 i += 1 i = context.eol(i) return True, i diff --git a/norminette/rules/is_block_end.py b/norminette/rules/is_block_end.py index 6e12550a..0a2eb870 100644 --- a/norminette/rules/is_block_end.py +++ b/norminette/rules/is_block_end.py @@ -1,19 +1,11 @@ -from lexer import Token -from rules import PrimaryRule -from context import ( - Function, - UserDefinedType, - VariableAssignation, - ControlStructure, - UserDefinedEnum) +from norminette.context import ControlStructure +from norminette.scope import UserDefinedEnum +from norminette.scope import UserDefinedType +from norminette.scope import VariableAssignation +from norminette.rules import Rule, Primary -class IsBlockEnd(PrimaryRule): - def __init__(self): - super().__init__() - self.priority = 40 - self.scope = [] - +class IsBlockEnd(Rule, Primary, priority=54): def check_udef_typedef(self, context, pos): i = context.skip_ws(pos) if context.check_token(i, "IDENTIFIER") is False: @@ -28,14 +20,14 @@ def check_udef_typedef(self, context, pos): def run(self, context): """ - Catches RBRACE tokens. - Handles scope related stuff: Exiting a scope is done here and in registry.py - Scope is calculated AFTER the rules have run for this primary rule + Catches RBRACE tokens. + Handles scope related stuff: Exiting a scope is done here and in registry.py + Scope is calculated AFTER the rules have run for this primary rule """ i = context.skip_ws(0) if context.check_token(i, "RBRACE") is False: return False, 0 - if type(context.scope) != ControlStructure: + if type(context.scope) is not ControlStructure: context.sub = context.scope.outer() else: context.scope.multiline = False @@ -47,7 +39,7 @@ def run(self, context): ret, i = self.check_udef_typedef(context, i) i = context.eol(i) return ret, i - elif context.check_token(i, 'SEMI_COLON') is True: + elif context.check_token(i, "SEMI_COLON") is True: i += 1 i = context.eol(i) return True, i @@ -59,7 +51,7 @@ def run(self, context): if type(context.scope) is VariableAssignation: i = context.skip_ws(i) if context.check_token(i, "SEMI_COLON"): - #Fatal err? + # Fatal err? return False, 0 i += 1 i = context.eol(i) diff --git a/norminette/rules/is_block_start.py b/norminette/rules/is_block_start.py index be8c07ae..b1f795c5 100644 --- a/norminette/rules/is_block_start.py +++ b/norminette/rules/is_block_start.py @@ -1,44 +1,47 @@ -from lexer import Token -from rules import PrimaryRule -from scope import * -from context import ( - Function, - UserDefinedType, - VariableAssignation, - ControlStructure) +from norminette.context import ControlStructure +from norminette.rules import Rule, Primary +from norminette.scope import GlobalScope, UserDefinedEnum, Function, UserDefinedType, VariableAssignation -class IsBlockStart(PrimaryRule): - def __init__(self): - super().__init__() - self.priority = 35 - self.scope = [ - Function, - UserDefinedType, - VariableAssignation, - ControlStructure, - UserDefinedEnum, - GlobalScope] +class IsBlockStart(Rule, Primary, priority=55): + scope = ( + Function, + UserDefinedType, + VariableAssignation, + ControlStructure, + UserDefinedEnum, + GlobalScope, + ) def run(self, context): """ - Catches LBRACE tokens - Creates new scope based on previous instruction or set it to multiline if it - is a control statement + Catches LBRACE tokens + Creates new scope based on previous instruction or set it to multiline if it + is a control statement """ i = context.skip_ws(0, nl=False) if context.check_token(i, "LBRACE") is False: return False, 0 i += 1 - hist = context.history lines = context.scope.lines - for item in hist[::-1]: + for item in reversed(context.history): if item == "IsEmptyLine" or item == "IsComment" or item == "IsPreprocessorStatement": lines -= 1 continue - if item not in ["IsControlStatement", "IsFuncDeclaration", "IsUserDefinedType"] or \ - (item in ["IsControlStatement", "IsFuncDeclaration", "IsUserDefinedType"] and lines >= 1): - context.sub = context.scope.inner(ControlStructure) + if ( + item + not in [ + "IsControlStatement", + "IsFuncDeclaration", + "IsUserDefinedType", + ] + or (item in ["IsControlStatement", "IsFuncDeclaration", "IsUserDefinedType"] and lines >= 1) + ): + scope = { + "IsFuncDeclaration": Function, + "IsUserDefinedType": UserDefinedType, + }.get(item, ControlStructure) + context.sub = context.scope.inner(scope) context.sub.multiline = True break else: @@ -46,7 +49,7 @@ def run(self, context): break tmp = i - #while context.peek_token(tmp) and (context.check_token(tmp, ["NEWLINE"])) is False: + # while context.peek_token(tmp) and (context.check_token(tmp, ["NEWLINE"])) is False: # tmp += 1 tmp = context.eol(tmp) if context.peek_token(tmp) is not None: diff --git a/norminette/rules/is_cast.py b/norminette/rules/is_cast.py index 934dd95e..bcf89e64 100644 --- a/norminette/rules/is_cast.py +++ b/norminette/rules/is_cast.py @@ -1,6 +1,4 @@ -from rules import PrimaryRule -from context import GlobalScope, VariableAssignation -from exceptions import CParsingError +from norminette.rules import Rule, Primary types = [ "CHAR", @@ -20,7 +18,7 @@ "CONST", "REGISTER", "STATIC", - "VOLATILE" + "VOLATILE", ] op = [ @@ -33,22 +31,16 @@ "PLUS", "DIV", "PTR", - "DOT" + "DOT", ] ws = ["SPACE", "TAB", "NEWLINE"] -class IsCast(PrimaryRule): - def __init__(self): - super().__init__() - self.primary = True - self.priority = 15 - self.scope = [] - +class IsCast(Rule, Primary, priority=15): def run(self, context): """ - Catches all casts instructions + Catches all casts instructions """ i = 0 i = context.skip_ws(i, nl=False) @@ -61,4 +53,4 @@ def run(self, context): i = context.skip_ws(i, nl=False) i = context.eol(i) return True, i - return False, 0 \ No newline at end of file + return False, 0 diff --git a/norminette/rules/is_comment.py b/norminette/rules/is_comment.py index 3a36d584..ca0d8fe5 100644 --- a/norminette/rules/is_comment.py +++ b/norminette/rules/is_comment.py @@ -1,20 +1,14 @@ -from lexer import Token -from rules import PrimaryRule -from context import GlobalScope, UserDefinedType, ControlStructure, Function +from norminette.rules import Rule, Primary -class IsComment(PrimaryRule): - def __init__(self): - super().__init__() - self.priority = 90 - self.scope = [] - +class IsComment(Rule, Primary, priority=90): def run(self, context): """ - Catches comments tokens + Catches comments tokens """ i = context.skip_ws(0) if context.check_token(i, ["MULT_COMMENT", "COMMENT"]) is True: + self.comment = context.peek_token(i) i += 1 i = context.eol(i) return True, i diff --git a/norminette/rules/is_control_statement.py b/norminette/rules/is_control_statement.py index a85b6f83..927ca098 100644 --- a/norminette/rules/is_control_statement.py +++ b/norminette/rules/is_control_statement.py @@ -1,21 +1,33 @@ -from rules import PrimaryRule -from context import ControlStructure, Function, GlobalScope -from exceptions import CParsingError +from norminette.context import ControlStructure +from norminette.scope import Function +from norminette.context import GlobalScope +from norminette.rules import Rule, Primary -cs_keywords = ["DO", "WHILE", "FOR", "IF", "ELSE", "SWITCH", "CASE", "DEFAULT", "IDENTIFIER"] +cs_keywords = [ + "DO", + "WHILE", + "FOR", + "IF", + "ELSE", + "SWITCH", + "CASE", + "DEFAULT", + "IDENTIFIER", +] whitespaces = ["TAB", "SPACE", "NEWLINE"] -class IsControlStatement(PrimaryRule): - def __init__(self): - super().__init__() - self.priority = 70 - self.scope = [Function, ControlStructure, GlobalScope] +class IsControlStatement(Rule, Primary, priority=65): + scope = ( + Function, + ControlStructure, + GlobalScope, + ) def run(self, context): """ - Catches control statements, including for/switch - Includes the condition, even if over multiple lines + Catches control statements, including for/switch + Includes the condition, even if over multiple lines """ is_id = False id_instead_cs = False @@ -71,7 +83,7 @@ def run(self, context): i = context.eol(i) return True, i i += 1 - if id_instead_cs == True: + if id_instead_cs is True: return False, 0 i = context.skip_ws(i, nl=False) if context.check_token(i, "LPARENTHESIS") is False: @@ -80,7 +92,7 @@ def run(self, context): i += 1 tmp = context.skip_ws(i, nl=True) if context.check_token(tmp, "SEMI_COLON") is True: - if is_id == True: + if is_id is True: return False, 0 tmp += 1 tmp = context.eol(tmp) diff --git a/norminette/rules/is_declaration.py b/norminette/rules/is_declaration.py index 9aabe8b4..f18f600c 100644 --- a/norminette/rules/is_declaration.py +++ b/norminette/rules/is_declaration.py @@ -1,30 +1,27 @@ -from rules import PrimaryRule -from context import ControlStructure, Function -from exceptions import CParsingError +from norminette.rules import Rule, Primary -class IsDeclaration(PrimaryRule): - def __init__(self): - super().__init__() - self.priority = 10 - self.scope = [] +class IsDeclaration(Rule, Primary, priority=5): def run(self, context): - #return False, 0 + # return False, 0 i = context.skip_ws(0, nl=False) p = 0 ident = None - while context.peek_token(i) is not None and context.check_token(i, "SEMI_COLON") is False: + while ( + context.peek_token(i) is not None + and context.check_token(i, "SEMI_COLON") is False + ): if context.check_token(i, "LPARENTHESIS"): p += 1 - if context.check_token(i, 'RPARENTHESIS'): + if context.check_token(i, "RPARENTHESIS"): p -= 1 - if context.check_token(i, ['IDENTIFIER', "NULL"]): + if context.check_token(i, ["IDENTIFIER", "NULL"]): ident = context.peek_token(i) i += 1 i += 1 i = context.skip_ws(i, nl=False) if context.check_token(i, "NEWLINE"): i += 1 - if p == 0 and ident != None: + if p == 0 and ident is not None: return True, i - return False, 0 \ No newline at end of file + return False, 0 diff --git a/norminette/rules/is_empty_line.py b/norminette/rules/is_empty_line.py index 8ddf26d7..02d982b7 100644 --- a/norminette/rules/is_empty_line.py +++ b/norminette/rules/is_empty_line.py @@ -1,26 +1,19 @@ -from rules import PrimaryRule -from context import ControlStructure, Function - +from norminette.rules import Rule, Primary cs_keywords = ["DO", "WHILE", "FOR", "IF", "ELSE", "SWITCH"] whitespaces = ["TAB", "SPACE", "NEWLINE"] -class IsEmptyLine(PrimaryRule): - def __init__(self): - super().__init__() - self.priority = 65 - self.scope = [] - +class IsEmptyLine(Rule, Primary, priority=70): def run(self, context): """ - Catches empty line - BUG: Catches end of line token on unrecognized line + Catches empty line + BUG: Catches end of line token on unrecognized line """ i = 0 while context.check_token(i, ["SPACE", "TAB"]) is True: i += 1 - if context.check_token(i, "NEWLINE") is True: + if context.check_token(i, "NEWLINE") is True or context.peek_token(i) is None: i = context.eol(i) return True, i return False, 0 diff --git a/norminette/rules/is_enum_var_decl.py b/norminette/rules/is_enum_var_decl.py index 8b7f0399..720560b2 100644 --- a/norminette/rules/is_enum_var_decl.py +++ b/norminette/rules/is_enum_var_decl.py @@ -1,18 +1,15 @@ -from lexer import Token -from rules import PrimaryRule -from scope import * -from context import GlobalScope, UserDefinedType, ControlStructure, Function, UserDefinedEnum +from norminette.rules import Rule, Primary +from norminette.scope import UserDefinedEnum lbrackets = ["LBRACE", "LPARENTHESIS", "LBRACKET"] rbrackets = ["RBRACE", "RPARENTHESIS", "RBRACKET"] -class IsEnumVarDecl(PrimaryRule): - def __init__(self): - super().__init__() - self.priority = 30 - self.scope = [UserDefinedEnum] +class IsEnumVarDecl(Rule, Primary, priority=30): + scope = ( + UserDefinedEnum, + ) def assignment_right_side(self, context, pos): sep = ["COMMA", "ASSIGN", "NEWLINE"] @@ -24,14 +21,21 @@ def assignment_right_side(self, context, pos): return True, i def var_declaration(self, context, pos): - pclose = ["RPARENTHESIS", "NEWLINE", "SPACE", "TAB"] brackets = 0 parenthesis = 0 braces = 0 i = pos identifier = False - while context.peek_token(i) is not None and context.check_token(i, ["COMMA", "RBRACE", "NEWLINE"]) is False: - if context.check_token(i, "IDENTIFIER") is True and braces == 0 and brackets == 0 and parenthesis == 0: + while ( + context.peek_token(i) is not None + and context.check_token(i, ["COMMA", "RBRACE", "NEWLINE"]) is False + ): + if ( + context.check_token(i, "IDENTIFIER") is True + and braces == 0 + and brackets == 0 + and parenthesis == 0 + ): identifier = True elif context.check_token(i, lbrackets) is True: if context.check_token(i, "LBRACE") is True: @@ -48,18 +52,20 @@ def var_declaration(self, context, pos): if context.check_token(i, "RPARENTHESIS") is True: parenthesis -= 1 elif context.check_token(i, "ASSIGN") is True: - if identifier == False: + if identifier is False: return False, pos ret, i = self.assignment_right_side(context, i + 1) i -= 1 if ret is False: return False, pos - elif context.check_token(i, ['SPACE', "TAB", "MULT", "BWISE_AND", "NEWLINE"]): + elif context.check_token( + i, ["SPACE", "TAB", "MULT", "BWISE_AND", "NEWLINE"] + ): pass elif parenthesis == 0 and brackets == 0 and braces == 0: return False, 0 i += 1 - if identifier == False: + if identifier is False: return False, pos if context.check_token(i, ["NEWLINE", "COMMA"]) is True: return True, i @@ -67,7 +73,7 @@ def var_declaration(self, context, pos): def run(self, context): """ - Enum have special var declarations so this catches these specific variables + Enum have special var declarations so this catches these specific variables """ ret, i = self.var_declaration(context, 0) if ret is False: diff --git a/norminette/rules/is_expression_statement.py b/norminette/rules/is_expression_statement.py index 70f9192f..123b0ee0 100644 --- a/norminette/rules/is_expression_statement.py +++ b/norminette/rules/is_expression_statement.py @@ -1,13 +1,9 @@ -from rules import PrimaryRule -from context import Function, ControlStructure -from exceptions import CParsingError +from norminette.context import ControlStructure +from norminette.scope import Function +from norminette.exceptions import CParsingError +from norminette.rules import Rule, Primary -keywords = [ - "BREAK", - "CONTINUE", - "GOTO", - "RETURN" -] +keywords = ["BREAK", "CONTINUE", "GOTO", "RETURN"] operators = [ "MULT", @@ -16,16 +12,17 @@ "LBRACKET", "RBRACKET", "PTR", - "DOT" + "DOT", ] -ws = ["SPACE", "TAB","NEWLINE"] +ws = ["SPACE", "TAB", "NEWLINE"] + -class IsExpressionStatement(PrimaryRule): - def __init__(self): - super().__init__() - self.priority = 25 - self.scope = [Function, ControlStructure] +class IsExpressionStatement(Rule, Primary, priority=25): + scope = ( + Function, + ControlStructure, + ) def check_reserved_keywords(self, context, pos): if context.check_token(pos, keywords) is False: @@ -39,14 +36,19 @@ def check_reserved_keywords(self, context, pos): elif context.check_token(pos, "GOTO"): i = pos + 1 i = context.skip_ws(i) - while context.check_token(i, ["MULT", "BWISE_AND"]) is True and context.is_operator(i) is False: + while ( + context.check_token(i, ["MULT", "BWISE_AND"]) is True + and context.is_operator(i) is False + ): i += 1 if context.check_token(i, "IDENTIFIER") is False: if context.check_token(i, "LPARENTHESIS") is True: - #parse label value here + # parse label value here i = context.skip_nest(i) elif context.debug == 0: - raise CParsingError("Goto statement should be followed by a label") + raise CParsingError( + "Error: Goto statement should be followed by a label" + ) i += 1 i = context.skip_ws(i) i += 1 @@ -134,17 +136,17 @@ def void_identifier(self, context, pos): def run(self, context): """ - Catches expression statement by elimination + Catches expression statement by elimination """ i = context.skip_ws(0) ret, i = self.check_instruction(context, i) if ret is False: ret, i = self.check_reserved_keywords(context, i) + # if ret is False: + # ret, i = self.check_inc_dec(context, i) if ret is False: - ret, i = self.check_inc_dec(context, i) + ret, i = self.void_identifier(context, i) if ret is False: - ret, i = self.void_identifier(context, i) - if ret is False: - return False, 0 + return False, 0 i = context.eol(i) return True, i diff --git a/norminette/rules/is_func_declaration.py b/norminette/rules/is_func_declaration.py index 6e088810..e4f6be0b 100644 --- a/norminette/rules/is_func_declaration.py +++ b/norminette/rules/is_func_declaration.py @@ -1,21 +1,11 @@ -from lexer import Token -from rules import PrimaryRule -from context import GlobalScope, Function, UserDefinedType +from norminette.scope import Function +from norminette.context import GlobalScope +from norminette.rules import Rule, Primary + whitespaces = ["SPACE", "TAB"] -preproc = [ - "DEFINE", - "ERROR", - "ENDIF", - "ELIF", - "IFDEF", - "IFNDEF", - "#IF", - "#ELSE", - "INCLUDE", - "PRAGMA", - "UNDEF" -] + +SEPARATORS = ["COMMA", "AND", "OR", "SEMI_COLON"] assigns = [ "RIGHT_ASSIGN", "LEFT_ASSIGN", @@ -42,7 +32,7 @@ "TYPEDEF", "STRUCT", "ENUM", - "UNION" + "UNION", ] type_identifier = [ "CHAR", @@ -55,11 +45,12 @@ "LONG", "SHORT", ] -class IsFuncDeclaration(PrimaryRule): - def __init__(self): - super().__init__() - self.priority = 60 - self.scope = [GlobalScope] + + +class IsFuncDeclaration(Rule, Primary, priority=81): + scope = ( + GlobalScope, + ) def check_args(self, context, pos): i = context.skip_ws(pos, nl=True) @@ -118,13 +109,31 @@ def check_func_format(self, context): if context.check_token(i, "NEWLINE") is True: return False, 0 while context.peek_token(i): - if context.check_token(i, "NEWLINE") is True and identifier == False and misc_id == [] and type_id == []: + while ( + context.check_token(i, "IDENTIFIER") is True + and context.peek_token(i).value == "__attribute__" + ): + i += 1 + i = context.skip_ws(i) + i = context.skip_nest(i) + i = context.skip_ws(i) + if ( + context.check_token(i, "NEWLINE") is True + and identifier is False + and misc_id == [] + and type_id == [] + ): return False, 0 if context.check_token(i, misc_identifier) is True: misc_id.append(context.peek_token(i)) elif context.check_token(i, type_identifier) is True: type_id.append(context.peek_token(i)) - if context.check_token(i, assigns + ["TYPEDEF", "COMMA", "LBRACE"] + preproc) is True: + if ( + context.check_token( + i, assigns + ["TYPEDEF", "COMMA", "LBRACE", "HASH"] + ) + is True + ): return False, 0 if context.check_token(i, "SEMI_COLON") is True: return False, 0 @@ -133,7 +142,7 @@ def check_func_format(self, context): type_id.append(identifier[0]) identifier = (context.peek_token(i), i) if context.check_token(i, "NEWLINE") is True: - if args == False: + if args is False: i += 1 continue else: @@ -164,14 +173,13 @@ def check_func_format(self, context): i += 1 else: i += 1 - #print (type_id, args, identifier) - if len(type_id) > 0 and args == True and identifier != None: + if len(type_id) > 0 and args is True and identifier is not None: i = identifier[1] i = context.skip_ws(i, nl=True) while context.check_token(i, ["LPARENTHESIS", "MULT", "BWISE_AND"]) is True: i += 1 sc = context.scope - while type(sc) != GlobalScope: + while type(sc) is not GlobalScope: sc = sc.outer() sc.fnames.append(context.peek_token(i).value) context.fname_pos = i @@ -185,34 +193,47 @@ def check_func_format(self, context): i = context.skip_nest(i) i += 1 i = context.skip_ws(i, nl=True) + while context.check_token(i, "LBRACKET"): + i = context.skip_nest(i) + i += 1 + i = context.skip_ws(i, nl=True) + while ( + context.check_token(i, "IDENTIFIER") is True + and context.peek_token(i).value == "__attribute__" + ): + i += 1 + i = context.skip_ws(i) + i = context.skip_nest(i) + 1 + i = context.skip_ws(i) return True, i return False, 0 def run(self, context): """ - Catches function declaration - Allows newline inside it - Creates context variable for function name, arg_start, arg_end + Catches function declaration + Allows newline inside it + Creates context variable for function name, arg_start, arg_end """ if type(context.scope) is not GlobalScope: return False, 0 - ret, read = self.check_func_format(context) if ret is False: return False, 0 while context.check_token(read, ["COMMENT", "MULT_COMMENT"]) is True: read += 1 read = context.skip_ws(read, nl=False) - if context.check_token(read, ["NEWLINE", "LBRACE"] + preproc): - if context.check_token(read, ["LBRACE"] + preproc) is True: + if context.check_token(read, ["NEWLINE", "LBRACE", "HASH"]): + if context.check_token(read, ["LBRACE", "HASH"]) is True: read -= 1 context.scope.functions += 1 read += 1 - context.sub = context.scope.inner(Function) + temp = context.skip_ws(read, nl=True) + if context.check_token(temp, "LBRACE"): + context.sub = context.scope.inner(Function) read = context.eol(read) return True, read - elif context.check_token(read, "SEMI_COLON"): + elif context.check_token(read, SEPARATORS): read += 1 read = context.eol(read) return False, 0 diff --git a/norminette/rules/is_func_prototype.py b/norminette/rules/is_func_prototype.py index 6662384e..16f3c66f 100644 --- a/norminette/rules/is_func_prototype.py +++ b/norminette/rules/is_func_prototype.py @@ -1,21 +1,8 @@ -from lexer import Token -from rules import PrimaryRule -from context import GlobalScope, Function, UserDefinedType +from norminette.context import GlobalScope +from norminette.scope import UserDefinedType +from norminette.rules import Rule, Primary whitespaces = ["SPACE", "TAB"] -preproc = [ - "DEFINE", - "ERROR", - "ENDIF", - "ELIF", - "IFDEF", - "IFNDEF", - "#IF", - "#ELSE", - "INCLUDE", - "PRAGMA", - "UNDEF" -] assigns = [ "RIGHT_ASSIGN", "LEFT_ASSIGN", @@ -42,7 +29,7 @@ "TYPEDEF", "STRUCT", "ENUM", - "UNION" + "UNION", ] type_identifier = [ "CHAR", @@ -55,11 +42,12 @@ "LONG", "SHORT", ] -class IsFuncPrototype(PrimaryRule): - def __init__(self): - super().__init__() - self.priority = 80 - self.scope = [GlobalScope, UserDefinedType] + + +class IsFuncPrototype(Rule, Primary, priority=82): + scope = ( + GlobalScope, + ) def check_args(self, context, pos): i = context.skip_ws(pos) @@ -117,12 +105,25 @@ def check_func_format(self, context): identifier = None if context.check_token(i, "NEWLINE") is True: return False, 0 - while context.peek_token(i):# and context.check_token(i, "NEWLINE") is False: + while context.peek_token(i): # and context.check_token(i, "NEWLINE") is False: + while ( + context.check_token(i, "IDENTIFIER") is True + and context.peek_token(i).value == "__attribute__" + ): + i += 1 + i = context.skip_ws(i) + i = context.skip_nest(i) + 1 + i = context.skip_ws(i) if context.check_token(i, misc_identifier) is True: misc_id.append(context.peek_token(i)) elif context.check_token(i, type_identifier) is True: type_id.append(context.peek_token(i)) - if context.check_token(i, assigns + ["TYPEDEF", "COMMA", "LBRACE", "RBRACE"] + preproc) is True: + if ( + context.check_token( + i, assigns + ["TYPEDEF", "COMMA", "LBRACE", "RBRACE", "HASH"] + ) + is True + ): return False, 0 if context.check_token(i, "SEMI_COLON") is True: break @@ -157,17 +158,19 @@ def check_func_format(self, context): break else: i += 1 - if len(type_id) > 0 and args == True and identifier != None: + if len(type_id) > 0 and args is True and identifier is not None: i = identifier[1] while context.check_token(i, ["LPARENTHESIS", "MULT", "BWISE_AND"]) is True: i += 1 sc = context.scope - while type(sc) != GlobalScope: + while type(sc) is not GlobalScope: sc = sc.outer() sc.fnames.append(context.peek_token(i).value) if context.func_alignment == 0: tmp = i - while context.check_token(tmp - 1, ["LPARENTHESIS", "MULT", "BWISE_AND"]): + while context.check_token( + tmp - 1, ["LPARENTHESIS", "MULT", "BWISE_AND"] + ): tmp -= 1 context.func_alignment = int(context.peek_token(tmp).pos[1] / 4) context.fname_pos = i @@ -184,21 +187,25 @@ def check_func_format(self, context): def run(self, context): """ - Catches function prototypes - Allows newline inside it - End condition is SEMI_COLON token, otherwise line will be considered as - function declaration + Catches function prototypes + Allows newline inside it + End condition is SEMI_COLON token, otherwise line will be considered as + function declaration """ - if type(context.scope) is not GlobalScope and type(context.scope) is not UserDefinedType: + if ( + type(context.scope) is not GlobalScope + and type(context.scope) is not UserDefinedType + ): return False, 0 ret, read = self.check_func_format(context) if ret is False: return False, 0 if context.check_token(read, "IDENTIFIER") is True: - if context.peek_token(read).value == "__attribute__": + while context.peek_token(read).value == "__attribute__": read += 1 read = context.skip_ws(read) read = context.skip_nest(read) + 1 + read = context.skip_ws(read, nl=True) while context.check_token(read, ["COMMENT", "MULT_COMMENT"]) is True: read += 1 read = context.skip_ws(read, nl=False) diff --git a/norminette/rules/is_function_call.py b/norminette/rules/is_function_call.py index aa67955b..e1e48d30 100644 --- a/norminette/rules/is_function_call.py +++ b/norminette/rules/is_function_call.py @@ -1,6 +1,5 @@ -from rules import PrimaryRule -from context import GlobalScope, VariableAssignation -from exceptions import CParsingError +from norminette.rules import Rule, Primary +from norminette.lexer.dictionary import keywords condition_ops = [ "LESS_OR_EQUAL", @@ -28,6 +27,8 @@ "MORE_THAN", ] +SEPARATORS = ["COMMA", "AND", "OR", "SEMI_COLON"] + assign_ops = [ "RIGHT_ASSIGN", "LEFT_ASSIGN", @@ -39,7 +40,7 @@ "AND_ASSIGN", "XOR_ASSIGN", "OR_ASSIGN", - "ASSIGN" + "ASSIGN", ] types = [ @@ -60,7 +61,7 @@ "CONST", "REGISTER", "STATIC", - "VOLATILE" + "VOLATILE", ] op = [ @@ -75,37 +76,33 @@ "INC", "DEC", "PTR", - "DOT" + "DOT", ] ws = ["SPACE", "TAB", "NEWLINE"] -class IsFunctionCall(PrimaryRule): - def __init__(self): - super().__init__() - self.primary = True - self.priority = 55 - self.scope = [] - +class IsFunctionCall(Rule, Primary, priority=80): def run(self, context): """ - Catches function calls when it's in an assignation + Catches function calls when it's in an assignation """ i = context.skip_ws(0, nl=False) types = [] while context.check_token(i, "LPARENTHESIS") is True: - start = i typ, i = context.parenthesis_contain(i) types.append(typ) - if typ == None or typ == "pointer": + if typ is None or typ == "pointer": i = context.skip_ws(i + 1) - if context.peek_token(i) is None or context.check_token(i, "NEWLINE") is True: + if ( + context.peek_token(i) is None + or context.check_token(i, "NEWLINE") is True + ): return False, 0 - #i += 1 + # i += 1 if len(types) > 1: i = context.skip_ws(i, nl=False) - if context.check_token(i, "SEMI_COLON") is True: + if context.check_token(i, SEPARATORS) is True: i += 1 i = context.eol(i) return True, i @@ -119,15 +116,31 @@ def run(self, context): if context.check_token(i, "IDENTIFIER") is True: i += 1 i = context.skip_ws(i) - if context.check_token(i, "LPARENTHESIS") is True: - i = context.skip_nest(i) - while context.peek_token(i) is not None and context.check_token(i, "SEMI_COLON") is False: + if context.check_token(i, "LPARENTHESIS"): + while context.check_token(i, "LPARENTHESIS") is True: + i = context.skip_nest(i) + 1 + i = context.skip_ws(i) + if context.check_token(i, "PTR"): # -> + i = context.skip_ws(i + 1) + if context.check_token(i, ("IDENTIFIER", *map(str.upper, keywords))): + i = context.skip_ws(i + 1) + if context.check_token(i, assign_ops): + expected = "SEMI_COLON" + else: + expected = SEPARATORS + while context.peek_token(i) is not None and not context.check_token(i, expected): i += 1 + if context.peek_token(i) is None: + return False, 0 i += 1 i = context.eol(i) return True, i - elif len(types) > 1 and typ == "cast" and (types[-2] == "function" or types[-2] == "pointer"): + elif ( + len(types) > 1 + and typ == "cast" + and (types[-2] == "function" or types[-2] == "pointer") + ): i += 1 i = context.eol(i) return True, i - return False, 0 \ No newline at end of file + return False, 0 diff --git a/norminette/rules/is_label.py b/norminette/rules/is_label.py index cd18d73e..76b70e13 100644 --- a/norminette/rules/is_label.py +++ b/norminette/rules/is_label.py @@ -1,22 +1,21 @@ -from rules import PrimaryRule +from norminette.rules import Rule, Primary -class IsLabel(PrimaryRule): - def __init__(self): - super().__init__() - self.priority = 5 +class IsLabel(Rule, Primary, priority=10): def run(self, context): """ - Catches label and raises norm error whenever + Catches label and raises norm error whenever """ i = context.skip_ws(0) if context.check_token(i, "IDENTIFIER") is False: return False, 0 - i += 1 - i = context.skip_ws(i) + i = context.skip_ws(i + 1) # +1 to skip the identifier if context.check_token(i, "COLON") is False: return False, 0 - while context.peek_token(i) and context.check_token(i, "NEWLINE") is False: + i = context.skip_ws(i + 1) # +1 to skip the colon + if context.check_token(i, "NEWLINE"): + return True, i+1 + while context.peek_token(i) and context.check_token(i, "SEMI_COLON") is False: i += 1 - i = context.eol(i) + i = context.eol(i + 1) # +1 to skip the semi-colon return True, i diff --git a/norminette/rules/is_preprocessor_statement.py b/norminette/rules/is_preprocessor_statement.py index 90d8da6c..06c87d4a 100644 --- a/norminette/rules/is_preprocessor_statement.py +++ b/norminette/rules/is_preprocessor_statement.py @@ -1,48 +1,363 @@ -from rules import PrimaryRule, Rule -import context -from scope import GlobalScope - -pp_keywords = [ - "PRAGMA", - "INCLUDE", - "UNDEF", - "DEFINE", - "#IF", - "ELIF", - "#ELSE", - "IFDEF", - "IFNDEF", - "ENDIF", - "ERROR", - "WARNING", - "IMPORT"] - -whitespaces = ["TAB", "SPACE", "NEWLINE"] - - -class IsPreprocessorStatement(PrimaryRule): - def __init__(self): - super().__init__() - self.priority = 85 - self.scope = [] +import sys +import contextlib +from norminette.rules import Rule, Primary +from norminette.lexer.dictionary import keywords +from norminette.exceptions import CParsingError +from norminette.context import Macro + +UNARY_OPERATORS = ( + "PLUS", + "MINUS", + "NOT", # ! + "BWISE_NOT", # ~ +) + +BINARY_OPERATORS = ( + # Arithmetic operators + "PLUS", + "MINUS", + "MULT", + "DIV", + "MODULO", + # Relational operators + "EQUALS", # == + "NOT_EQUAL", # != + "MORE_THAN", # > (why not GREATER_THAN?) + "LESS_THAN", # < + "GREATER_OR_EQUAL", # >= + "LESS_OR_EQUAL", # <= + # Logical operators + "AND", + "OR", + # Bitwise operators + "BWISE_AND", # & + "BWISE_OR", # | + "BWISE_XOR", # ^ + "BWISE_XOR", # ^ + "LEFT_SHIFT", # << (why not BWISE_LEFT?) + "RIGHT_SHIFT", # >> +) + +ALLOWED_IN_PATH = ( + "IDENTIFIER", + "DIV", + "MINUS", + "DOT", + "SPACE", + "TAB", + # TODO Remove all keyword tokens and add to just use 'IDENTIFIER' instead + *keywords.values(), # https://github.com/42School/norminette/issues/470 +) + + +@contextlib.contextmanager +def recursion_limit(limit): + old_limit = sys.getrecursionlimit() + sys.setrecursionlimit(limit) + yield + sys.setrecursionlimit(old_limit) + + +class IsPreprocessorStatement(Rule, Primary, priority=100): def run(self, context): """ - Catches any kind of preprocessor statements - Handles indentation related informations + Catches any kind of preprocessor statements + Handles indentation related informations """ i = context.skip_ws(0) - if context.check_token(i, pp_keywords) is True: - if context.peek_token(i).value is None \ - or context.peek_token(i).value.startswith("#") is False: - return False, 0 - if context.check_token(i, ["IFDEF", "IFNDEF"]): - context.preproc_scope_indent += 1 - elif context.check_token(i, "ENDIF") \ - and context.preproc_scope_indent > 0: - context.preproc_scope_indent -= 1 - i += 1 - else: + if not context.check_token(i, "HASH"): return False, 0 - i = context.eol(i) - return True, i + self.hash = context.peek_token(i) + i += 1 + i = context.skip_ws(i) + if context.check_token(i, "NEWLINE"): # Null directive + return True, i + 1 # TODO: Fix null directives (comments) + # Why `if` and `else` need to be a special case? + if not context.check_token(i, ("IDENTIFIER", "IF", "ELSE")): + raise CParsingError(f"Invalid preprocessor statement {context.peek_token(i)}") + token = context.peek_token(i) + direc = (token.value if token.type == "IDENTIFIER" else token.type).lower() + i += 1 + i = context.skip_ws(i) + if checker := getattr(self, f"check_{direc}", None): + return checker(context, i) + raise CParsingError(f"Invalid preprocessing directive #{direc}") + + def check_define(self, context, index): + if not context.check_token(index, "IDENTIFIER"): + raise CParsingError("No identifier after #define") + token = context.peek_token(index) + index += 1 + if is_function := context.check_token(index, "LPARENTHESIS"): + index += 1 + index = context.skip_ws(index) + while context.check_token(index, "IDENTIFIER"): + index += 1 + index = context.skip_ws(index) + if context.check_token(index, "COMMA"): + index += 1 + index = context.skip_ws(index) + # Add better errors like check EOF and invalid identifier? + if not context.check_token(index, "RPARENTHESIS"): + raise CParsingError("Invalid macro function definition") + index += 1 + macro = Macro.from_token(token, is_func=is_function) + context.preproc.macros.append(macro) + index = context.skip_ws(index) + return self._just_token_string("define", context, index) + + def check_import(self, context, index): + is_valid_argument, index = self._check_path(context, index) + if not is_valid_argument: + raise CParsingError("Invalid file argument for #import directive") + index = context.skip_ws(index) + return self._just_token_string("import", context, index) + + def check_pragma(self, context, index): + return self._just_token_string("pragma", context, index) + + def check_error(self, context, index): + return self._just_token_string("error", context, index) + + def check_warning(self, context, index): + return self._just_token_string("warning", context, index) + + def check_if(self, context, index): + if not self.corresponding_endif(context, index): + context.new_error("PREPROC_BAD_IF", self.hash) + context.preproc.indent += 1 + context.preproc.total_ifs += 1 + return self._just_constant_expression("if", context, index) + + def check_elif(self, context, index): + if not self.corresponding_endif(context, index): + context.new_error("PREPROC_BAD_ELIF", self.hash) + context.preproc.total_elifs += 1 + return self._just_constant_expression("elif", context, index) + + def check_ifdef(self, context, index): + if not self.corresponding_endif(context, index): + context.new_error("PREPROC_BAD_IFDEF", self.hash) + context.preproc.indent += 1 + context.preproc.total_ifdefs += 1 + return self._just_identifier("ifdef", context, index) + + def check_ifndef(self, context, index): + if not self.corresponding_endif(context, index): + context.new_error("PREPROC_BAD_IFNDEF", self.hash) + context.preproc.indent += 1 + context.preproc.total_ifndefs += 1 + return self._just_identifier("infdef", context, index) + + def check_undef(self, context, index): + return self._just_identifier("undef", context, index) + + def check_else(self, context, index): + if context.preproc.indent == 0: + context.new_error("PREPROC_BAD_ELSE", self.hash) + context.preproc.total_elses += 1 + return self._just_eol("else", context, index) + + def check_endif(self, context, index): + if context.preproc.indent == 0: + context.new_error("PREPROC_BAD_ENDIF", self.hash) + context.preproc.indent -= 1 + return self._just_eol("endif", context, index) + + def check_include(self, context, index): + is_valid_argument, index = self._check_path(context, index) + if not is_valid_argument: + raise CParsingError("Invalid file argument for #include directive") + return self._just_eol("include", context, index) + + def corresponding_endif(self, context, index): + """Checks if the corresponding `#endif` is present. + """ + depth = 0 + while index < len(context.tokens): + if not context.check_token(index, "HASH"): + index += 1 + continue + + index += 1 + index = context.skip_ws(index) + if not context.check_token(index, ("IDENTIFIER", "IF", "ELSE")): + continue + + token = context.peek_token(index) + direc = (token.value if token.type == "IDENTIFIER" else token.type).lower() + if direc == "endif": + if depth == 0: + return True + depth -= 1 + elif direc in ("if", "ifdef", "ifndef"): + depth += 1 + index += 1 + return False + + def _check_path(self, context, index): + """Checks the argument of an include/import statement. + + Examples of valid headers: + - `"libft.h"` + - `< bla.h >` + - `<42.h >` + - `< four.two>` + """ + # TODO: It not works with `#include ` because of the `if` keyword + if context.check_token(index, "STRING"): + index += 1 + return True, index + if not context.check_token(index, "LESS_THAN"): + return False, index + index = context.skip_ws(index + 1) + while context.check_token(index, ALLOWED_IN_PATH): + index += 1 + if not context.check_token(index, "MORE_THAN"): + return False, index + index += 1 + return True, index + + def _just_token_string(self, directive, context, index): + index = context.skip_ws(index, comment=True) + if context.check_token(index, "NEWLINE"): + index += 1 + return True, index + lines = 1 + newline = False + while context.peek_token(index) is not None and lines > 0: + if context.check_token(index, "NEWLINE"): + lines -= 1 + newline = False + elif context.check_token(index, "BACKSLASH") and not newline: + lines += 1 + newline = True + index += 1 + if lines > 0 and context.peek_token(index) is not None: + raise CParsingError(f"Unexpected end of file after #{directive} directive") + return True, index + + def _just_constant_expression(self, directive, context, index): + parser = ConstantExpressionParser(directive, context, index) + ok, index = parser.parse() + if not ok: + return ok, index + return self._just_eol(directive, context, index) + + def _just_identifier(self, directive, context, index): + if not context.check_token(index, "IDENTIFIER"): + raise CParsingError(f"Invalid argument for #{directive} statement") + index += 1 + return self._just_eol(directive, context, index) + + def _just_eol(self, directive, context, index): + index = context.skip_ws(index, comment=True) + if context.peek_token(index) is None: + return True, index + # raise CParsingError(f"Unexpected end of file after #{directive} directive") + if not context.check_token(index, "NEWLINE"): + raise CParsingError(f"Extra tokens at end of #{directive} directive") + index += 1 + return True, index + + +class ConstantExpressionParser: + """Parses a constant expression that can be used in preprocessor statements. + + ```bnf + ::= + | unary_operator + | ( "(" ")" | ) ( binary_operator )* + ::= string + | constant + | identifier + | identifier '(' [ ("," )* ] ')' + ``` + The `string`, `constant` and `identifier` comes from the tokenizer. + """ + + def __init__(self, directive, context, index): + self.directive = directive + self.context = context + self.index = index + + def parse(self): + try: + index = self.index + self.parse_constant_expression() + if index == self.index: # No tokens were parsed + raise CParsingError(f"No argument for #{self.directive} statement") + if self.context.peek_token(self.index) is None: + raise CParsingError("Unexpected end of file while parsing constant expression") + self.index = self.context.skip_ws(self.index, comment=True) + if not self.context.check_token(self.index, "NEWLINE"): + raise CParsingError("Unexpected tokens after the constant expression") + # self.index += 1 # Skip the newline + except RecursionError: + raise CParsingError("Constant expression too complex") + return True, self.index + + def skip_ws(self): + self.index = self.context.skip_ws(self.index) + + @recursion_limit(100) + def parse_constant_expression(self): + self.parse_expression() + + def parse_expression(self): + if self.context.check_token(self.index, "LPARENTHESIS"): + self.index += 1 + self.parse_expression() + if not self.context.check_token(self.index, "RPARENTHESIS"): + raise CParsingError("Missing closing parenthesis while parsing constant expression") + self.index += 1 + self.parse_potential_binary_operator() + return + + if ( + self.context.check_token(self.index, UNARY_OPERATORS) + or ( + self.context.check_token(self.index, "IDENTIFIER") + and self.context.peek_token(self.index).value == "defined" + ) + ): + self.index += 1 + self.skip_ws() + self.parse_expression() + return + + if self.context.check_token(self.index, "IDENTIFIER"): + self.index += 1 + if self.context.check_token(self.index, "LPARENTHESIS"): + self.index += 1 + self.parse_function_macro() + return + self.parse_potential_binary_operator() + return + + if self.context.check_token(self.index, ("STRING", "CONSTANT", "CHAR_CONST")): + self.index += 1 + self.parse_potential_binary_operator() + return + + def parse_function_macro(self): + self.skip_ws() + if not self.context.check_token(self.index, "RPARENTHESIS"): + self.parse_expression() + while self.context.check_token(self.index, "COMMA"): + self.index += 1 + self.skip_ws() + self.parse_expression() + if not self.context.check_token(self.index, "RPARENTHESIS"): + raise CParsingError("Missing closing parenthesis") + self.index += 1 + self.skip_ws() + + def parse_potential_binary_operator(self): + self.skip_ws() + if self.context.check_token(self.index, BINARY_OPERATORS): + self.index += 1 + self.skip_ws() + self.parse_expression() + return diff --git a/norminette/rules/is_ternary.py b/norminette/rules/is_ternary.py index 5ffa5f9c..59e30087 100644 --- a/norminette/rules/is_ternary.py +++ b/norminette/rules/is_ternary.py @@ -1,21 +1,15 @@ -from lexer import Token -from rules import Rule -import string +from norminette.rules import Rule, Primary -class IsTernary(Rule): - def __init__(self): - super().__init__() - self.priority = 50 - self.scope = [] +class IsTernary(Rule, Primary, priority=53): def run(self, context): """ - Catches ternaries and raises an error + Catches ternaries and raises an error """ i = 0 - while context.peek_token(i) is not None and context.check_token(i, "SEMI_COLON") is False: + while context.peek_token(i) is not None and context.check_token(i, ["SEMI_COLON", "NEWLINE"]) is False: if context.check_token(i, "TERN_CONDITION") is True: - while context.peek_token(i) is not None and context.check_token(i, "SEMI_COLON") is False: + while context.peek_token(i) is not None and context.check_token(i, ["SEMI_COLON", "NEWLINE"]) is False: i += 1 i += 1 i = context.eol(i) diff --git a/norminette/rules/is_user_defined_type.py b/norminette/rules/is_user_defined_type.py index b90aa271..97ab6b2b 100644 --- a/norminette/rules/is_user_defined_type.py +++ b/norminette/rules/is_user_defined_type.py @@ -1,19 +1,10 @@ -from lexer import Token -from rules import PrimaryRule -from context import GlobalScope, UserDefinedType -from exceptions import CParsingError -from scope import * +from norminette.rules import Rule, Primary +from norminette.scope import UserDefinedType, UserDefinedEnum utypes = ["TYPEDEF", "UNION", "STRUCT", "ENUM"] -class IsUserDefinedType(PrimaryRule): - def __init__(self): - super().__init__() - self.priority = 45 - self.scope = [GlobalScope, UserDefinedType] - - +class IsUserDefinedType(Rule, Primary, priority=45): def typedef(self, context, pos): i = context.skip_ws(pos) if "TYPEDEF" not in [tkn.type for tkn in context.tokens[:i]]: @@ -32,18 +23,17 @@ def utype_definition(self, context, pos): if not [tkn for tkn in context.tokens[:pos] if tkn.type in utypes]: return False, pos return True, pos - i = context.skip_ws(i) - return ret, i def run(self, context): """ - Catches user type definitions - Can include the whole type definition in case it's a structure - Variable declarations aren't included + Catches user type definitions + Can include the whole type definition in case it's a structure + Variable declarations aren't included """ i = context.skip_ws(0, nl=False) enum = False p = 0 + ids = [] while context.peek_token(i) is not None: if context.check_token(i, utypes) is True and p <= 0: break @@ -51,7 +41,7 @@ def run(self, context): p += 1 if context.check_token(i, "RPARENTHESIS") is True: p -= 1 - if context.check_token(i, ['NEWLINE', 'SEMI_COLON']) is True: + if context.check_token(i, ["NEWLINE", "SEMI_COLON"]) is True: return False, 0 i += 1 if context.peek_token(i) is None: @@ -66,9 +56,11 @@ def run(self, context): enum = True if context.check_token(i, ["NEWLINE", "SEMI_COLON"]) is True and p == 0: break + if context.check_token(i, "IDENTIFIER"): + ids.append(context.peek_token(i)) i += 1 if context.check_token(i, "NEWLINE") is True and p <= 0: - if enum == True: + if enum is True: context.sub = context.scope.inner(UserDefinedEnum) else: context.sub = context.scope.inner(UserDefinedType) @@ -76,5 +68,6 @@ def run(self, context): return True, i elif context.check_token(i, "SEMI_COLON") is True: i += 1 + context.scope.vars_name.append(ids[-1]) i = context.eol(i) return True, i diff --git a/norminette/rules/is_var_declaration.py b/norminette/rules/is_var_declaration.py index c3eea4e1..93ae9d64 100644 --- a/norminette/rules/is_var_declaration.py +++ b/norminette/rules/is_var_declaration.py @@ -1,7 +1,8 @@ -from lexer import Token -from rules import PrimaryRule -from context import GlobalScope, UserDefinedType, ControlStructure, Function - +from norminette.context import ControlStructure +from norminette.scope import Function +from norminette.context import GlobalScope +from norminette.scope import UserDefinedType +from norminette.rules import Rule, Primary lbrackets = ["LBRACE", "LPARENTHESIS", "LBRACKET"] rbrackets = ["RBRACE", "RPARENTHESIS", "RBRACKET"] @@ -13,7 +14,7 @@ "VOLATILE", "EXTERN", "INLINE", - "RESTRICT" + "RESTRICT", "SIGNED", "UNSIGNED", ] @@ -30,14 +31,17 @@ "SHORT", "STRUCT", "ENUM", - "UNION" + "UNION", ] -class IsVarDeclaration(PrimaryRule): - def __init__(self): - super().__init__() - self.priority = 75 - self.scope = [GlobalScope, UserDefinedType, Function, ControlStructure] + +class IsVarDeclaration(Rule, Primary, priority=75): + scope = ( + GlobalScope, + UserDefinedType, + Function, + ControlStructure, + ) def assignment_right_side(self, context, pos): sep = ["COMMA", "SEMI_COLON", "ASSIGN"] @@ -49,19 +53,31 @@ def assignment_right_side(self, context, pos): return True, i def var_declaration(self, context, pos, identifier=False): - pclose = ["RPARENTHESIS", "NEWLINE", "SPACE", "TAB"] brackets = 0 parenthesis = 0 braces = 0 i = pos ret_store = None - while context.peek_token(i) is not None and context.check_token(i, ["COMMA", "SEMI_COLON"]) is False: - if context.check_token(i, "IDENTIFIER") is True and braces == 0 and brackets == 0 and parenthesis == 0: + ids = [] + while ( + context.peek_token(i) is not None + and context.check_token(i, ["SEMI_COLON"]) is False + ): + if ( + context.check_token(i, "IDENTIFIER") is True + and braces == 0 + and brackets == 0 + and parenthesis == 0 + ): identifier = True + ids.append(context.peek_token(i)) elif context.check_token(i, ["COMMENT", "MULT_COMMENT"]) is True: i += 1 continue - elif context.check_token(i, ["COLON", "CONSTANT"]) is True and identifier == True: + elif ( + context.check_token(i, ["COLON", "CONSTANT"]) is True + and identifier is True + ): i += 1 continue elif context.check_token(i, lbrackets) is True: @@ -69,11 +85,25 @@ def var_declaration(self, context, pos, identifier=False): braces += 1 if context.check_token(i, "LBRACKET") is True: brackets += 1 - if context.check_token(i, "LPARENTHESIS") is True: + if ( + context.check_token(i, "LPARENTHESIS") is True + and brackets == 0 + and braces == 0 + ): ret, tmp = context.parenthesis_contain(i, ret_store) - if ret == 'function' or ret == 'pointer': + if ret == "function" or ret == "pointer" or ret == "var": ret_store = ret identifier = True + tmp2 = tmp - 1 + deep = 1 + while tmp2 > 0 and deep > 0: + if context.check_token(tmp2, "IDENTIFIER"): + ids.append(context.peek_token(tmp2)) + if context.check_token(tmp2, "RPARENTHESIS"): + deep += 1 + if context.check_token(tmp2, "LPARENTHESIS"): + deep -= 1 + tmp2 -= 1 i = tmp else: parenthesis += 1 @@ -85,21 +115,34 @@ def var_declaration(self, context, pos, identifier=False): if context.check_token(i, "RPARENTHESIS") is True: parenthesis -= 1 elif context.check_token(i, "ASSIGN") is True: - if identifier == False: + if identifier is False: return False, pos ret, i = self.assignment_right_side(context, i + 1) i -= 1 if ret is False: return False, pos - elif context.check_token(i, ['SPACE', "TAB", "MULT", "BWISE_AND", "NEWLINE"] + misc_specifiers + type_specifiers): + elif context.check_token( + i, + ["SPACE", "TAB", "MULT", "BWISE_AND", "NEWLINE"] + + misc_specifiers + + type_specifiers, + ): pass + elif ( + context.check_token(i, "COMMA") is True + and parenthesis == 0 + and brackets == 0 + and braces == 0 + ): + break elif parenthesis == 0 and brackets == 0 and braces == 0: return False, 0 i += 1 - if identifier == False or braces > 0 or brackets > 0 or parenthesis > 0: + if identifier is False or braces > 0 or brackets > 0 or parenthesis > 0: return False, 0 + context.scope.vars_name.append(ids[-1]) if context.check_token(i, "SEMI_COLON") is True: - if brackets == 0 and braces == 0 and parenthesis == 0: + if brackets <= 0 and braces <= 0 and parenthesis <= 0: return True, i else: return False, 0 @@ -110,13 +153,13 @@ def var_declaration(self, context, pos, identifier=False): def is_func_pointer(self, context, pos): i = context.skip_ws(pos) - ws = ['SPACE', "TAB", "NEWLINE"] + ws = ["SPACE", "TAB", "NEWLINE"] if context.check_token(i, "LPARENTHESIS") is False: return False, pos identifier = False i += 1 p = 1 - plvl= 0 # nesting level of the first pointer operator encountered + plvl = 0 # nesting level of the first pointer operator encountered while p and context.check_token(i, ["MULT", "LPARENTHESIS"] + ws): if context.check_token(i, "MULT") and not plvl: @@ -133,7 +176,6 @@ def is_func_pointer(self, context, pos): elif context.check_token(i, "RPARENTHESIS") is True: p -= 1 if identifier is True: - par_pos = i break elif context.check_token(i, "IDENTIFIER") is True: identifier = True @@ -146,15 +188,21 @@ def is_func_pointer(self, context, pos): def run(self, context): """ - Catches all kinds of variable declarations + Catches all kinds of variable declarations """ ret, i = context.check_type_specifier(0) + i = context.skip_ws(i) if ret is False: return False, 0 tmp = i - 1 while context.check_token(tmp, ["LPARENTHESIS", "MULT", "BWISE_AND"]): tmp -= 1 - if context.check_token(tmp, ['SPACE', 'TAB']) is False and context.check_token(tmp - 1, ['SPACE', 'TAB']) is False: + if context.check_token(tmp, "SEMI_COLON"): + return True, i + if ( + context.check_token(tmp, ["SPACE", "TAB", "NEWLINE"]) is False + and context.check_token(tmp - 1, ["SPACE", "TAB", "NEWLINE"]) is False + ): return False, 0 ret, i = self.var_declaration(context, i) if ret is False: diff --git a/norminette/rules/rule.py b/norminette/rules/rule.py index 2820dcc6..7c0465c1 100644 --- a/norminette/rules/rule.py +++ b/norminette/rules/rule.py @@ -1,30 +1,89 @@ +from typing import Tuple, Any + +from norminette.context import Context + class Rule: - def __init__(self): - self.name = type(self).__name__ - self.depends_on = [] - self.primary = False - - def register(self, registry): - if self.depends_on == []: - if 'all' in registry.dependencies: - registry.dependencies['all'].append(self.name) - else: - registry.dependencies['all'] = [self.name] - - for rule in self.depends_on: - if rule in registry.dependencies: - registry.dependencies[rule].append(self.name) - else: - registry.dependencies[rule] = [self.name] - - -class PrimaryRule(Rule): - def __init__(self): - super().__init__() - self.primary = True - self.priority = 0 - self.scope = [] - - def run(self, context): + __slots__ = () + + def __new__(cls, context: Context, *args, **kwargs): + cls.context = context + cls.name = cls.__name__ + + return super().__new__(cls, *args, **kwargs) + + def __repr__(self) -> str: + return self.name + + def __hash__(self) -> int: + return hash(self.name) + + def __eq__(self, value: Any) -> bool: + if isinstance(value, str): + return self.name == value + if isinstance(value, Rule): + return self.name == value.name + return super().__eq__(value) + + def __ne__(self, value: Any) -> bool: + return not (self == value) + + +class Check: + __slots__ = () + + depends_on: Tuple[str, ...] + + runs_on_start: bool + runs_on_rule: bool + runs_on_end: bool + + def __init_subclass__(cls, **kwargs): + if not hasattr(cls, "depends_on"): + cls.depends_on = () + cls.runs_on_start = kwargs.pop("runs_on_start", getattr(cls, "runs_on_start", False)) + cls.runs_on_rule = kwargs.pop("runs_on_rule", getattr(cls, "runs_on_rule", not cls.depends_on)) + cls.runs_on_end = kwargs.pop("runs_on_end", getattr(cls, "runs_on_end", False)) + + @classmethod + def register(cls, registry): + for rule in cls.depends_on: + registry.dependencies[rule].append(cls) + if cls.runs_on_start: + registry.dependencies["_start"].append(cls) + if cls.runs_on_rule: + registry.dependencies["_rule"].append(cls) + if cls.runs_on_end: + registry.dependencies["_end"].append(cls) + + def is_starting(self): + """Returns if this `Check` is being run before `Primary`. + + It is only called if `runs_on_start` is set to `True`. + """ + return self.context.state == "starting" # type: ignore + + def is_ending(self): + """Returns if this `Check` is being run after all rules. + + It is only called if `runs_on_end` is set to `True`. + """ + return self.context.state == "ending" # type: ignore + + def run(self, context: Context) -> None: + return + + +class Primary: + __slots__ = () + + priority: int + scope: Tuple[str, ...] + + def __init_subclass__(cls, **kwargs: Any): + cls.priority = kwargs.pop("priority", 0) + if not hasattr(cls, "scope"): + cls.scope = () + + def run(self, context: Context) -> Tuple[bool, int]: return False, 0 diff --git a/norminette/run_test.sh b/norminette/run_test.sh deleted file mode 100755 index 16b2a1a8..00000000 --- a/norminette/run_test.sh +++ /dev/null @@ -1,6 +0,0 @@ -echo "Running lexer unit test:" -python -m unittest discover tests/lexer/unit-tests/ "*.py" -echo "Running lexer test on files:" -python -m tests.lexer.files.file_token_test -python -m tests.lexer.errors.tester -python -m tests.rules.rule_tester diff --git a/norminette/scope.py b/norminette/scope.py index 1db98de0..a7ad9402 100644 --- a/norminette/scope.py +++ b/norminette/scope.py @@ -1,8 +1,9 @@ class Scope: """ - Main scope class - Contain various scope informations updated as the norminette runs through the file + Main scope class + Contain various scope informations updated as the norminette runs through the file """ + def __init__(self, parent=None): self.parent = parent self.name = type(self).__name__ @@ -13,6 +14,7 @@ def __init__(self, parent=None): # ########################################################## # self.vdeclarations_allowed = False self.vars = 0 + self.vars_name = [] self.vars_alignment = 0 self.func_alignment = 0 # ########################################################## # @@ -27,27 +29,41 @@ def inner(self, sub): def outer(self): """ - Return outer scope (None if called on GlobalScope) - Adds the line of current scope to parent scope - to calculate function length or control structure length + Return outer scope (None if called on GlobalScope) + Adds the line of current scope to parent scope + to calculate function length or control structure length """ if self.parent is not None: self.parent.lines += self.lines - #print (f"{self.name} -> {self.parent.name}") + # print (f"{self.name} -> {self.parent.name}") return self.parent + def __eq__(self, value) -> bool: + if isinstance(value, str): + return self.name == value + if issubclass(value, Scope): + return self.name == value.__name__ + if hasattr(value, "name"): + return self.name == value.name + return super().__eq__(value) + + def __ne__(self, value) -> bool: + return not (self == value) + def get_outer(self): """ - Allows to peek to the parent scope without adding lines to - the parent scope + Allows to peek to the parent scope without adding lines to + the parent scope """ return self.parent + class GlobalScope(Scope): """ - GlobalScope contains every other scope - Has no parent scope (returns None) + GlobalScope contains every other scope + Has no parent scope (returns None) """ + def __init__(self): super().__init__() self.fdeclarations_allowed = True @@ -56,11 +72,13 @@ def __init__(self): self.func_alignment = 0 self.include_allowed = True + class Function(Scope): """ Function definition scope, anything between the opening/closing braces of a function """ + def __init__(self, parent): super().__init__(parent) self.fname_pos = 0 @@ -75,6 +93,7 @@ class ControlStructure(Scope): only one instruction, if that instruction creates a new sub scope, it can contain as many instruction as that scope can "hold" """ + def __init__(self, parent, multiline=False): super().__init__(parent) self.multiline = multiline @@ -85,15 +104,18 @@ class UserDefinedType(Scope): User defined type scope (struct, union, enum), only variables declarations are allowed within this scope """ + def __init__(self, parent, typedef=False): super().__init__(parent) self.typedef = typedef + class UserDefinedEnum(Scope): """ User defined type scope (struct, union, enum), only variables declarations are allowed within this scope """ + def __init__(self, parent, typedef=False): super().__init__(parent) self.typedef = typedef @@ -105,4 +127,5 @@ class VariableAssignation(Scope): assignations (int foo[4] = {0, 0, 0, 0};) easier. - Unused """ + pass diff --git a/norminette/tests/__init__.py b/norminette/tests/__init__.py deleted file mode 100644 index 2f409eb5..00000000 --- a/norminette/tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Rule testing suite diff --git a/norminette/tests/a.c b/norminette/tests/a.c deleted file mode 100644 index f6a149c3..00000000 --- a/norminette/tests/a.c +++ /dev/null @@ -1,63 +0,0 @@ -int (foo(int a)) -{ - if (1) - { - -} - return (1); -} - -short int a() { return 1; } - -int (faa)(int *a(int), char b, int c, int r) -{ - return 1; -} - -#include -int *truc() -{ - return malloc(sizeof(int)); -} - -int (*f2(void))(int) { - return foo; } - -int ((*((((bar)(int a))))))(int) -{ - return (foo); -} - -int (*(f)(char a, int t, int b))(int) { return foo;} - -int (*fp(int)); //Function pointer, NOT FUNC! -int ((*(fp2))(int a)); - -//int ((*T)(int))(int); - -int ((*f23(int))) ; -int (*fff[1])(void); -enum Bla { - A, - B -}; - -enum Bla func(void); -unsigned enum Bla func(void); -long long enum Bla func(void); -enum long long Bla func(void); -enum long long Bla func(void); - Bla func(void); -youpi func2(void); -trololo43 const func3(youpibanane cahuete); -trololo43 func3(youpibanane); -trololo43 func3(cahuete); -trololo43 func3(youpibanane cahuete); -trololo43 func3(youpibanane cahuete, lol choupette); -trololo43 func3(youpibanane cahuete, lol choupette); -trololo43 func3(youpibanane cahuete,lol choupette); -trololo43 func3(youpibanane cahuete, lol ***choupette); -trololo43 func3(youpibanane cahuete, lol * choupette); -trololo43 func3(youpibanane **cahuete, lol*** **choupette); -trololo trololol func4(uopi sks); -***func4(udidf fdfd); diff --git a/norminette/tests/c.c b/norminette/tests/c.c deleted file mode 100644 index 83eca060..00000000 --- a/norminette/tests/c.c +++ /dev/null @@ -1,64 +0,0 @@ -#define foo 1 void bar(struct foo *a, struct foo **b) -{ - int a; - int b; -} - -int f(void) -{ -return 1; -} - -int a = f(); - -struct foo { -int a, b, c; -}; - -typedef struct f0o { - int a, b, c; -} t_foo; - -t_foo variable = { - 1, 2, 3 -}; - -typedef int POUET; - - -POUET a = 5; - -typedef struct bar { - int a; - int b; - char truc; -} t_bar; - -void bar(struct foo *a, struct foo **b) -{ - (void)a, (void)b; -} - -struct mystruct; - - -int write(int a, char *p, int n); - -struct mystruct { -int a, b, c; -}; - -#include -int main(void) { - int a, b = 5, c, d, e; - - b = 2, c =3, write(1, "A", 2);; - (void)1; - while (1) - if (1) - if (1) - if (1) - write(1,"B", 1); - - printf("%lu", sizeof(struct mystruct)); -} diff --git a/norminette/tests/func.c b/norminette/tests/func.c deleted file mode 100644 index 19a65304..00000000 --- a/norminette/tests/func.c +++ /dev/null @@ -1,39 +0,0 @@ -#include -/* basic func */ - int foo(int a) { return 1; } - -/* basic func, name and params wrapped in parentheses */ -int (foo2(int a)) -{ return 1; } - - -/* basic func, name wrapped in parentheses - * */ -int (*foo3(int *f(int), int a)) { return (int*)0; } - -/* basic func, name and params wrapped in way too much parentheses */ -int ((((foo4(int a))))) { return 1; } - -/* basic func, returning a pointer*/ -int *foo5(){ return malloc(sizeof(int)); } - -/* func returning a func pointer*/ -int(*foo6(int a, int b, int *z, int c, int d))(int) -{ -return foo; -} - -/* func returning a func pointer wrapped in way too much parentheses */ -int (((*foo7(int a, - int b))(int))) { return foo; } - - -/* func returning a func pointer, wrapped in way too much parentheses #2 */ -int (((*foOo8(void))(int))) -{ -int a(); -return foo; -} -/* func pointers, not funcs!!*/ -//int *(foo9)(int); -//int (*(foo10)(int)); diff --git a/norminette/tests/functest.c b/norminette/tests/functest.c deleted file mode 100644 index 03123b89..00000000 --- a/norminette/tests/functest.c +++ /dev/null @@ -1,44 +0,0 @@ -#include -int foo(int a[2]); -int (foobar(int a)) { return 1;} -int (truc(int a)) { return a; } -int bar(int a,int b, ...); -int ((((foo2bar(int a,int b ,int c))))) -{ - return 1; -} -int f(int a); -int func(int a); -char (*s1(int a[1])) -{ - return malloc(sizeof(char) * 1); -} - - -//int (*fp(int a))(int) { return foo; } - - -//int ((*T)(int))(int); -int (*p)(int) = 0; -int ((*p)(int))(int) { return f;} - -//int ((*fppppp)(int a)) {return 1;} - -#include -#include - -char * (s(void)) -{ - return strdup("yo!\n"); -} - -int main(void) -{ - int ((*fp)(int)); - int (*ffp)(); - int **fpp(int *f(int)); - - fp = f; - write(1, s(), strlen(s())); - return 1; -} diff --git a/norminette/tests/ko_struct_name.out b/norminette/tests/ko_struct_name.out deleted file mode 100644 index 8d00d40a..00000000 --- a/norminette/tests/ko_struct_name.out +++ /dev/null @@ -1,12 +0,0 @@ -tests/rules/ko_struct_name.c: KO! - ESTRUCT_TYPE_NAMING (line: 1, col: 16): Structure name must start with s_ - EUSER_DEFINED_TYPEDEF (line: 1, col: 23): User defined typedef must start with t_ - EGLOBAL_VAR_NAMING (line: 2, col: 19): Global variable must start with g_ - EGLOBAL_VAR_NAMING (line: 3, col: 10): Global variable must start with g_ - ESTRUCT_TYPE_NAMING (line: 5, col: 16): Structure name must start with s_ - ESTRUCT_TYPE_NAMING (line: 6, col: 12): Structure name must start with s_ - EUNION_TYPE_NAMING (line: 7, col: 11): Union name must start with u_ - EUNION_TYPE_NAMING (line: 8, col: 19): Union name must start with u_ - EUSER_DEFINED_TYPEDEF (line: 8, col: 24): User defined typedef must start with t_ - EENUM_TYPE_NAMING (line: 9, col: 10): Enum name must start with e_ - EUSER_DEFINED_TYPEDEF (line: 10, col: 5): User defined typedef must start with t_ \ No newline at end of file diff --git a/norminette/tests/lexer/__init__.py b/norminette/tests/lexer/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/norminette/tests/lexer/errors/__init__.py b/norminette/tests/lexer/errors/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/norminette/tests/lexer/errors/dict.py b/norminette/tests/lexer/errors/dict.py deleted file mode 100644 index 96403b75..00000000 --- a/norminette/tests/lexer/errors/dict.py +++ /dev/null @@ -1,34 +0,0 @@ -failed_tokens_tests = { - # String to test as key: error position as value - "\tdouble f=45e++ai": [1, 14], - "\tchar *b = \"e42\n\n": [1, 15], - "int\t\t\tn\t= 0x1uLl;": [1, 19], - "char\t\t\t*yo\t\t\t= \"": [1, 31], - "{return 1;}\\\\\\n": [1, 12], - "int a = a+++++a;\ndouble b = .0e4x;": [2, 12], - "int a = 1;\nint b = 10ul;\nint c = 10lul;\n": [3, 9], - "int number = 0x1uLl;": [1, 14], - "int number = 0x1ULl;": [1, 14], - "int number = 0x1lL;": [1, 14], - "int number = 0x1Ll;": [1, 14], - "int number = 0x1UlL;": [1, 14], - "int number = 10ullll": [1, 14], - "int number = 10lul": [1, 14], - "int number = 10lUl": [1, 14], - "int number = 10LUl": [1, 14], - "int number = 10uu": [1, 14], - "int number = 10Uu": [1, 14], - "int number = 10UU": [1, 14], - "int number = 0b0101e": [1, 14], - "int number = 0b0101f": [1, 14], - "int number = 0b0X101f": [1, 14], - "int number = 0X101Uf": [1, 14], - "int number = 0101f": [1, 14], - "float number=10.12fe10": [1, 14], - "float number=10.fU": [1, 14], - "float number=21.3E56E4654": [1, 14], - "float number=105e4d": [1, 14], - "float number=105flu": [1, 14], - "float number=105fu": [1, 14], - "float number=105eu": [1, 14] -} diff --git a/norminette/tests/lexer/errors/tester.py b/norminette/tests/lexer/errors/tester.py deleted file mode 100644 index d44c556a..00000000 --- a/norminette/tests/lexer/errors/tester.py +++ /dev/null @@ -1,63 +0,0 @@ - -import sys -import glob -import difflib -from lexer import Lexer -from lexer import TokenError -from tests.lexer.errors.dict import failed_tokens_tests as test_dict - - -def read_file(filename): - with open(filename) as f: - return f.read() - - -class norminetteTester(): - - def __init__(self): - self.__tests = 0 - self.__failed = 0 - self.__success = 0 - self.result = [] - - def assertRaises(self, test, ref, test_line): - try: - diff = "".join(test()) - self.__failed += 1 - print(test_line + "KO") - print(diff, end="") - self.result.append("✗ ") - except TokenError as e: - if e.msg == ref: - self.__success += 1 - self.result.append("✓ ") - else: - self.__failed += 1 - print(test_line + "KO") - diff = difflib.ndiff(e.msg.splitlines(), - ref.splitlines()) - diff = list(diff) - self.result.append("✗ ") - print(''.join(diff)) - - def main(self): - print("\n\nTesting error cases:\n") - i = 1 - for key, val in test_dict.items(): - self.__tests += 1 - ref_output = f"Unrecognized token line {val[0]}, col {val[1]}" - func = Lexer(key).check_tokens - self.assertRaises(func, ref_output, f"Test {i}: " + repr(str(key))) - i += 1 - - print("----------------------------------") - print(f"Total {self.__tests}") - print("".join(self.result)) - print(f"Success {self.__success}, Failed {self.__failed}: ", end="") - print("✅ OK!" if self.__failed == 0 else "❌ KO!") - - sys.exit(0 if self.__failed == 0 else 1) - - -if __name__ == '__main__': - norminetteTester().main() diff --git a/norminette/tests/lexer/files/__init__.py b/norminette/tests/lexer/files/__init__.py deleted file mode 100644 index 131337dc..00000000 --- a/norminette/tests/lexer/files/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Testing modules diff --git a/norminette/tests/lexer/files/file_token_test.py b/norminette/tests/lexer/files/file_token_test.py deleted file mode 100644 index 49482a9c..00000000 --- a/norminette/tests/lexer/files/file_token_test.py +++ /dev/null @@ -1,84 +0,0 @@ -import sys -import glob -import difflib -from lexer import Lexer -from lexer import TokenError - - -def read_file(filename): - with open(filename) as f: - return f.read() - - -class norminetteFileTester(): - - def __init__(self): - self.__tests = 0 - self.__failed = 0 - self.__success = 0 - self.result = [] - - def assertEqual(self, first, second): - if first == second: - self.__success += 1 - print("OK") - self.result.append("✓ ") - else: - print("KO") - self.__failed += 1 - diff = difflib.ndiff(first.splitlines(keepends=True), - second.splitlines(keepends=True)) - diff = list(diff) - self.result.append("✗ ") - print(''.join(diff)) - - def assertRaises(self, test, ref): - try: - diff = "".join(test()) - self.__failed += 1 - print("KO") - print(diff, end="") - self.result.append("✗ ") - except TokenError as e: - if e.msg == ref: - self.__success += 1 - print(f"OK") - self.result.append("✓ ") - else: - self.__failed += 1 - print("KO") - diff = difflib.ndiff(e.msg.splitlines(), - ref.splitlines()) - diff = list(diff) - self.result.append("✗ ") - print(''.join(diff)) - - def test_files(self): - files = glob.glob("tests/lexer/files/*.c") - files.sort() - for f in files: - self.__tests += 1 - print(f.split('/')[-1], end=": ") - - try: - output = Lexer(read_file(f)).check_tokens() - except TokenError as t: - self.__failed += 1 - print("KO") - print(t) - self.result.append("✗ ") - continue - reference_output = read_file(f.split(".")[0] + ".tokens") - self.assertEqual(output, reference_output) - - print("----------------------------------") - print(f"Total {self.__tests}") - print("".join(self.result)) - print(f"Success {self.__success}, Failed {self.__failed}: ", end="") - print("✅ OK!" if self.__failed == 0 else "❌ KO!") - - sys.exit(0 if self.__failed == 0 else 1) - - -if __name__ == '__main__': - norminetteFileTester().test_files() diff --git a/norminette/tests/lexer/files/ok_test_05.tokens b/norminette/tests/lexer/files/ok_test_05.tokens deleted file mode 100644 index e4f26f39..00000000 --- a/norminette/tests/lexer/files/ok_test_05.tokens +++ /dev/null @@ -1,13 +0,0 @@ -> - - - - - - - - - diff --git a/norminette/tests/lexer/files/ok_test_21.tokens b/norminette/tests/lexer/files/ok_test_21.tokens deleted file mode 100644 index 8643cfdd..00000000 --- a/norminette/tests/lexer/files/ok_test_21.tokens +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/norminette/tests/lexer/unit-tests/brackets_tokens_test.py b/norminette/tests/lexer/unit-tests/brackets_tokens_test.py deleted file mode 100644 index 5d764768..00000000 --- a/norminette/tests/lexer/unit-tests/brackets_tokens_test.py +++ /dev/null @@ -1,34 +0,0 @@ -import unittest -import sys -from lexer.lexer import Lexer - - -class BracketsTokensTest(unittest.TestCase): - - def test_opening_bracket(self): - self.assertEqual( - Lexer("{").get_next_token().type, - "LBRACE") - - def test_closing_bracket(self): - self.assertEqual(Lexer("}").get_next_token().type, "RBRACE") - - def test_opening_parenthesis(self): - self.assertEqual(Lexer("(").get_next_token().type, "LPARENTHESIS") - - def test_closing_parenthesis(self): - self.assertEqual(Lexer(")").get_next_token().type, "RPARENTHESIS") - - def test_opening_square_bracket(self): - self.assertEqual( - Lexer("[").get_next_token().type, - "LBRACKET") - - def test_closing_square_bracket(self): - self.assertEqual( - Lexer("]").get_next_token().type, - "RBRACKET") - - -if __name__ == '__main__': - unittest.main() diff --git a/norminette/tests/lexer/unit-tests/char_constant_tokens_test.py b/norminette/tests/lexer/unit-tests/char_constant_tokens_test.py deleted file mode 100644 index c217a01e..00000000 --- a/norminette/tests/lexer/unit-tests/char_constant_tokens_test.py +++ /dev/null @@ -1,50 +0,0 @@ -import unittest -import sys -from lexer.lexer import Lexer, TokenError - - -class CharConstTokenTest(unittest.TestCase): - def assertRaises(self, test): - try: - test() - return False - except TokenError: - return True - - def test_basic_char(self): - self.assertEqual( - Lexer("'*'").get_next_token().test(), - "") - - def test_escaped_newline(self): - self.assertEqual( - Lexer("'\\n'").get_next_token().test(), - "") - - def test_octal_char(self): - self.assertEqual( - Lexer("'\\042'").get_next_token().test(), - "") - - def test_hex_char(self): - self.assertEqual( - Lexer("'0x042'").get_next_token().test(), - "") - - def test_hex_char(self): - self.assertEqual( - Lexer("'0x042'").get_next_token().test(), - "") - - def test_error_newline_in_const(self): - self.assertRaises(Lexer("'\n1'").get_next_token) - - def test_error_escaped_newline_followed_by_newline(self): - self.assertRaises(Lexer("'\\n\n'").get_next_token) - - def test_error_unclosed_quote(self): - self.assertRaises(Lexer("'A").get_next_token) - - -if __name__ == '__main__': - unittest.main() diff --git a/norminette/tests/lexer/unit-tests/constant_tokens_test.py b/norminette/tests/lexer/unit-tests/constant_tokens_test.py deleted file mode 100644 index 3b85380c..00000000 --- a/norminette/tests/lexer/unit-tests/constant_tokens_test.py +++ /dev/null @@ -1,129 +0,0 @@ -import unittest -import sys -from lexer.lexer import Lexer, TokenError - - -class ConstantTokensTest(unittest.TestCase): - - def assertRaises(self, test): - try: - test() - return False - except TokenError: - return True - - def test_basic_constant(self): - self.assertEqual(Lexer("42").check_tokens(), "\n") - - def test_plus_sign_constant(self): - self.assertEqual( - Lexer("+42").check_tokens(), - "\n") - - def test_minus_sign_constant(self): - self.assertEqual( - Lexer("-42").check_tokens(), - "\n") - - def test_many_signs_constant(self): - self.assertEqual( - Lexer("+-42").check_tokens(), - "\n") - - def test_decimal_constant(self): - self.assertEqual( - Lexer("4.2").check_tokens(), - "\n") - - def test_decimal_constant_starting_with_dot(self): - self.assertEqual( - Lexer(".42").check_tokens(), - "\n") - - def test_exponential_constant(self): - self.assertEqual( - Lexer("4e2").check_tokens(), - "\n") - - def test_exponential_constant_starting_with_dot(self): - self.assertEqual( - Lexer(".4e2").check_tokens(), - "\n") - - def test_octal_constant(self): - self.assertEqual( - Lexer("042").check_tokens(), - "\n") - - def test_hex_constant(self): - self.assertEqual( - Lexer("0x42").check_tokens(), - "\n") - - def test_hex_with_sign_constant(self): - self.assertEqual( - Lexer("-0x4e2").check_tokens(), - "\n") - - def test_hex_with_many_signs_constant(self): - self.assertEqual( - Lexer("-+-+-+-+-+-+-+-0Xe4Ae2").check_tokens(), - "" - + "" - + "" - + "\n") - - def test_long_constant(self): - self.assertEqual( - Lexer("42l").check_tokens(), - "\n") - - def test_unsigned_long_constant(self): - self.assertEqual( - Lexer("42ul").check_tokens(), - "\n") - - def test_long_long_constant(self): - self.assertEqual( - Lexer("42ll").check_tokens(), - "\n") - - def test_unsigned_long_long_constant(self): - self.assertEqual( - Lexer("42ull").check_tokens(), - "\n") - - def test_unsigned_constant(self): - self.assertEqual( - Lexer("42u").check_tokens(), - "\n") - - def test_error_too_many_dots(self): - self.assertRaises(Lexer("4.4.4").check_tokens) - - def test_error_too_many_e(self): - self.assertRaises(Lexer("4e4e4").check_tokens) - - def test_error_too_many_x(self): - self.assertRaises(Lexer("4x4x4").check_tokens) - - def test_error_too_many_u(self): - self.assertRaises(Lexer("42uul").check_tokens) - - def test_error_too_many_l(self): - self.assertRaises(Lexer("42Lllu").check_tokens) - - def test_error_misplaced_l(self): - self.assertRaises(Lexer("42lul").check_tokens) - - def test_misplaced_e(self): - self.assertEqual( - Lexer(".e42").check_tokens(), - "\n") - - def test_another_misplaced_e(self): - self.assertRaises(Lexer(".42e").check_tokens) - - -if __name__ == '__main__': - unittest.main() diff --git a/norminette/tests/lexer/unit-tests/identifiers_tokens_test.py b/norminette/tests/lexer/unit-tests/identifiers_tokens_test.py deleted file mode 100644 index 005b11a5..00000000 --- a/norminette/tests/lexer/unit-tests/identifiers_tokens_test.py +++ /dev/null @@ -1,45 +0,0 @@ -import unittest -import sys -from lexer.lexer import Lexer - - -def eat_tokens(line): - lex = Lexer(line) - line = "" - while lex.get_next_token(): - line += lex.peek_token().test() - if lex.peek_token().type in ["EOF", "ERROR"]: - break - return line - - -class IdentifiersTokensTest(unittest.TestCase): - - def test_simple_identifier(self): - self.assertEqual(eat_tokens("foo"), "") - - def test_underscore_identifier(self): - self.assertEqual(eat_tokens("_foo"), "") - - def test_underscore_with_number_identifier(self): - self.assertEqual(eat_tokens("_foo42"), "") - - def test_double_underscore_with_number_identifier(self): - self.assertEqual(eat_tokens("_foo__42"), "") - - def test_underscore_and_uppercase_identifier(self): - self.assertEqual(eat_tokens("_FOO"), "") - - def test_underscore_at_the_end_and_uppercase_identifier(self): - self.assertEqual(eat_tokens("FOO_"), "") - - def test_identifier_can_not_start_with_a_number(self): - self.assertNotEqual(eat_tokens("5_FOO_"), "") - - def test_identifier_can_not_have_a_space(self): - self.assertNotEqual(eat_tokens("foo 1"), "") diff --git a/norminette/tests/lexer/unit-tests/keywords_tokens_test.py b/norminette/tests/lexer/unit-tests/keywords_tokens_test.py deleted file mode 100644 index f4a88e41..00000000 --- a/norminette/tests/lexer/unit-tests/keywords_tokens_test.py +++ /dev/null @@ -1,132 +0,0 @@ -import unittest -import sys -from lexer.lexer import Lexer - - -def eat_tokens(line): - lex = Lexer(line) - line = "" - while lex.get_next_token(): - line += lex.peek_token().test() - return line - - -class TokensKeywordsTest(unittest.TestCase): - - def test_auto_keyword(self): - self.assertEqual(eat_tokens("auto"), "") - - def test_break_keyword(self): - self.assertEqual(eat_tokens("break"), "") - - def test_case_keyword(self): - self.assertEqual(eat_tokens("case"), "") - - def test_char_keyword(self): - self.assertEqual(eat_tokens("char"), "") - - def test_const_keyword(self): - self.assertEqual(eat_tokens("const"), "") - - def test_continue_keyword(self): - self.assertEqual(eat_tokens("continue"), "") - - def test_default_keyword(self): - self.assertEqual(eat_tokens("default"), "") - - def test_do_keyword(self): - self.assertEqual(eat_tokens("do"), "") - - def test_double_keyword(self): - self.assertEqual(eat_tokens("double"), "") - - def test_else_keyword(self): - self.assertEqual(eat_tokens("else"), "") - - def test_enum_keyword(self): - self.assertEqual(eat_tokens("enum"), "") - - def test_extern_keyword(self): - self.assertEqual(eat_tokens("extern"), "") - - def test_float_keyword(self): - self.assertEqual(eat_tokens("float"), "") - - def test_for_keyword(self): - self.assertEqual(eat_tokens("for"), "") - - def test_goto_keyword(self): - self.assertEqual(eat_tokens("goto"), "") - - def test_if_keyword(self): - self.assertEqual(eat_tokens("if"), "") - - def test_int_keyword(self): - self.assertEqual(eat_tokens("int"), "") - - def test_long_keyword(self): - self.assertEqual(eat_tokens("long"), "") - - def test_register_keyword(self): - self.assertEqual(eat_tokens("register"), "") - - def test_return_keyword(self): - self.assertEqual(eat_tokens("return"), "") - - def test_signed_keyword(self): - self.assertEqual(eat_tokens("signed"), "") - - def test_sizeof_keyword(self): - self.assertEqual(eat_tokens("sizeof"), "") - - def test_static_keyword(self): - self.assertEqual(eat_tokens("static"), "") - - def test_struct_keyword(self): - self.assertEqual(eat_tokens("struct"), "") - - def test_switch_keyword(self): - self.assertEqual(eat_tokens("switch"), "") - - def test_typedef_keyword(self): - self.assertEqual(eat_tokens("typedef"), "") - - def test_union_keyword(self): - self.assertEqual(eat_tokens("union"), "") - - def test_unsigned_keyword(self): - self.assertEqual(eat_tokens("unsigned"), "") - - def test_void_keyword(self): - self.assertEqual(eat_tokens("void"), "") - - def test_volatile_keyword(self): - self.assertEqual(eat_tokens("volatile"), "") - - def test_while_keyword(self): - self.assertEqual(eat_tokens("while"), "") - - def test_define_keyword(self): - self.assertEqual(eat_tokens("#define"), "") - - def test_error_keyword(self): - self.assertEqual(eat_tokens("#error"), "") - - def test_ifndef_keyword(self): - self.assertEqual(eat_tokens("#ifndef"), "") - - def test_ifdef_keyword(self): - self.assertEqual(eat_tokens("#ifdef"), "") - - def test_include_keyword(self): - self.assertEqual(eat_tokens("#include"), "") - - def test_pragma_keyword(self): - self.assertEqual(eat_tokens("#pragma"), "") - - def test_undef_keyword(self): - self.assertEqual(eat_tokens("#undef"), "") - - -if __name__ == '__main__': - unittest.main() diff --git a/norminette/tests/lexer/unit-tests/operators_tokens_test.py b/norminette/tests/lexer/unit-tests/operators_tokens_test.py deleted file mode 100644 index 6b23c0e8..00000000 --- a/norminette/tests/lexer/unit-tests/operators_tokens_test.py +++ /dev/null @@ -1,129 +0,0 @@ -import unittest -import sys -from lexer.lexer import Lexer - - -class TokensOperatorsTest(unittest.TestCase): - - def test_op_right_assign(self): - self.assertEqual(Lexer(">>=").get_next_token().type, "RIGHT_ASSIGN") - - def test_op_left_assign(self): - self.assertEqual(Lexer("<<=").get_next_token().type, "LEFT_ASSIGN") - - def test_op_add_assign(self): - self.assertEqual(Lexer("+=").get_next_token().type, "ADD_ASSIGN") - - def test_op_sub_assign(self): - self.assertEqual(Lexer("-=").get_next_token().type, "SUB_ASSIGN") - - def test_op_mul_assign(self): - self.assertEqual(Lexer("*=").get_next_token().type, "MUL_ASSIGN") - - def test_op_div_assign(self): - self.assertEqual(Lexer("/=").get_next_token().type, "DIV_ASSIGN") - - def test_op_mod_assign(self): - self.assertEqual(Lexer("%=").get_next_token().type, "MOD_ASSIGN") - - def test_op_and_assign(self): - self.assertEqual(Lexer("&=").get_next_token().type, "AND_ASSIGN") - - def test_op_xor_assign(self): - self.assertEqual(Lexer("^=").get_next_token().type, "XOR_ASSIGN") - - def test_op_or_assign(self): - self.assertEqual(Lexer("|=").get_next_token().type, "OR_ASSIGN") - - def test_op_le_assign(self): - self.assertEqual(Lexer("<=").get_next_token().type, "LESS_OR_EQUAL") - - def test_op_ge_assign(self): - self.assertEqual(Lexer(">=").get_next_token().type, "GREATER_OR_EQUAL") - - def test_op_eq_assign(self): - self.assertEqual(Lexer("==").get_next_token().type, "EQUALS") - - def test_op_ne_assign(self): - self.assertEqual(Lexer("!=").get_next_token().type, "NOT_EQUAL") - - def test_op_assign(self): - self.assertEqual(Lexer("=").get_next_token().type, "ASSIGN") - - def test_op_semi_colon(self): - self.assertEqual(Lexer(";").get_next_token().type, "SEMI_COLON") - - def test_op_colon(self): - self.assertEqual(Lexer(":").get_next_token().type, "COLON") - - def test_op_comma(self): - self.assertEqual(Lexer(",").get_next_token().type, "COMMA") - - def test_op_dot(self): - self.assertEqual(Lexer(".").get_next_token().type, "DOT") - - def test_op_not(self): - self.assertEqual(Lexer("!").get_next_token().type, "NOT") - - def test_op_minus(self): - self.assertEqual(Lexer("-").get_next_token().type, "MINUS") - - def test_op_plus(self): - self.assertEqual(Lexer("+").get_next_token().type, "PLUS") - - def test_op_mult(self): - self.assertEqual(Lexer("*").get_next_token().type, "MULT") - - def test_op_div(self): - self.assertEqual(Lexer("/").get_next_token().type, "DIV") - - def test_op_modulo(self): - self.assertEqual(Lexer("%").get_next_token().type, "MODULO") - - def test_op_less_than(self): - self.assertEqual(Lexer("<").get_next_token().type, "LESS_THAN") - - def test_op_more_than(self): - self.assertEqual(Lexer(">").get_next_token().type, "MORE_THAN") - - def test_op_ellipsis(self): - self.assertEqual(Lexer("...").get_next_token().type, "ELLIPSIS") - - def test_op_inc(self): - self.assertEqual(Lexer("++").get_next_token().type, "INC") - - def test_op_dec(self): - self.assertEqual(Lexer("--").get_next_token().type, "DEC") - - def test_op_ptr(self): - self.assertEqual(Lexer("->").get_next_token().type, "PTR") - - def test_op_and(self): - self.assertEqual(Lexer("&&").get_next_token().type, "AND") - - def test_op_or(self): - self.assertEqual(Lexer("||").get_next_token().type, "OR") - - def test_op_bwise_xor(self): - self.assertEqual(Lexer("^").get_next_token().type, "BWISE_XOR") - - def test_op_bwise_or(self): - self.assertEqual(Lexer("|").get_next_token().type, "BWISE_OR") - - def test_op_bwise_not(self): - self.assertEqual(Lexer("~").get_next_token().type, "BWISE_NOT") - - def test_op_bwise_and(self): - self.assertEqual(Lexer("&").get_next_token().type, "BWISE_AND") - - def test_op_right_shift(self): - self.assertEqual(Lexer(">>").get_next_token().type, "RIGHT_SHIFT") - - def test_op_left_shift(self): - self.assertEqual(Lexer("<<").get_next_token().type, "LEFT_SHIFT") - - def test_op_tern_condition(self): - self.assertEqual(Lexer("?").get_next_token().type, "TERN_CONDITION") - - if __name__ == '__main__': - unittest.main() diff --git a/norminette/tests/lexer/unit-tests/string_tokens_test.py b/norminette/tests/lexer/unit-tests/string_tokens_test.py deleted file mode 100644 index d2bcebd3..00000000 --- a/norminette/tests/lexer/unit-tests/string_tokens_test.py +++ /dev/null @@ -1,31 +0,0 @@ -import unittest -import sys -from lexer.lexer import Lexer - - -class StringTokenTest(unittest.TestCase): - - def test_basic_string(self): - self.assertEqual( - Lexer('"Basic string"').get_next_token().test(), - '') - - def test_basic_L_string(self): - self.assertEqual( - Lexer('L"Basic string"').get_next_token().test(), - '') - - def test_basic_escaped_string(self): - self.assertEqual( - Lexer('"Basic \\"string\\""').get_next_token().test(), - '') - - def test_escaped_string(self): - self.assertEqual( - Lexer('"Escaped \\\\\\"string\\\\\\\\\\\"\\\\"').get_next_token() - .test(), - '') - - -if __name__ == '__main__': - unittest.main() diff --git a/norminette/tests/loop.zsh b/norminette/tests/loop.zsh deleted file mode 100755 index 8c32e9ff..00000000 --- a/norminette/tests/loop.zsh +++ /dev/null @@ -1,2 +0,0 @@ -for ((i = 0; i < 100; i++)); do -python . **/*.c; done diff --git a/norminette/tests/rules/integer_constants.out b/norminette/tests/rules/integer_constants.out deleted file mode 100644 index c1dfcdc8..00000000 --- a/norminette/tests/rules/integer_constants.out +++ /dev/null @@ -1,79 +0,0 @@ -integer_constants.c - IsFuncDeclaration In "GlobalScope" from "None" line 1": - -integer_constants.c - IsBlockStart In "Function" from "GlobalScope" line 2": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 3": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 4": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 5": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 6": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 7": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 8": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 9": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 10": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 11": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 12": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 13": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 14": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 15": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 16": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 17": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 18": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 19": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 20": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 21": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 22": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 23": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 24": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 25": - -integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 26": - -integer_constants.c - IsBlockEnd In "Function" from "GlobalScope" line 27": - -integer_constants.c: KO! - DECL_ASSIGN_LINE (line: 3, col: 33): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 4, col: 34): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 5, col: 34): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 6, col: 35): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 7, col: 35): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 8, col: 36): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 9, col: 33): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 10, col: 34): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 11, col: 33): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 12, col: 34): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 13, col: 34): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 14, col: 35): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 15, col: 35): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 16, col: 36): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 17, col: 33): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 18, col: 34): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 19, col: 33): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 20, col: 34): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 21, col: 34): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 22, col: 35): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 23, col: 35): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 24, col: 36): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 25, col: 33): Declaration and assignation on a single line - DECL_ASSIGN_LINE (line: 26, col: 34): Declaration and assignation on a single line diff --git a/norminette/tests/rules/ko_include.out b/norminette/tests/rules/ko_include.out deleted file mode 100644 index 080fa854..00000000 --- a/norminette/tests/rules/ko_include.out +++ /dev/null @@ -1,10 +0,0 @@ -ko_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": - > -ko_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": - -ko_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 3": - > -ko_include.c: KO! - INCLUDE_HEADER_ONLY (line: 1, col: 1): .c file includes are forbidden - MACRO_NAME_CAPITAL (line: 2, col: 3): Macro name must be capitalized - INCLUDE_START_FILE (line: 3, col: 1): Include must be at the start of file diff --git a/norminette/tests/rules/ko_preproc_define.out b/norminette/tests/rules/ko_preproc_define.out deleted file mode 100644 index 5e9a34e9..00000000 --- a/norminette/tests/rules/ko_preproc_define.out +++ /dev/null @@ -1,26 +0,0 @@ -ko_preproc_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": - -ko_preproc_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": - -ko_preproc_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 3": - -ko_preproc_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 4": - -ko_preproc_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 5": - -ko_preproc_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 6": - -ko_preproc_define.c: KO! - MACRO_NAME_CAPITAL (line: 1, col: 2): Macro name must be capitalized - PREPROC_CONSTANT (line: 1, col: 7): Preprocessor statement must only contain constant defines - PREPROC_CONSTANT (line: 1, col: 8): Preprocessor statement must only contain constant defines - TOO_MANY_VALS (line: 1, col: 9): Too many values on define - TOO_MANY_VALS (line: 2, col: 8): Too many values on define - PREPROC_CONSTANT (line: 3, col: 11): Preprocessor statement must only contain constant defines - PREPROC_CONSTANT (line: 3, col: 17): Preprocessor statement must only contain constant defines - PREPROC_CONSTANT (line: 4, col: 12): Preprocessor statement must only contain constant defines - TOO_MANY_VALS (line: 5, col: 16): Too many values on define - PREPROC_START_LINE (line: 6, col: 1): Preprocessor statement not at the beginning of the line - SPACE_REPLACE_TAB (line: 6, col: 2): Found space when expecting tab - MACRO_NAME_CAPITAL (line: 6, col: 7): Macro name must be capitalized - PREPROC_CONSTANT (line: 6, col: 12): Preprocessor statement must only contain constant defines diff --git a/norminette/tests/rules/ko_preproc_indent.out b/norminette/tests/rules/ko_preproc_indent.out deleted file mode 100644 index cd2952df..00000000 --- a/norminette/tests/rules/ko_preproc_indent.out +++ /dev/null @@ -1,15 +0,0 @@ -ko_preproc_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": - -ko_preproc_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": - -ko_preproc_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 3": - -ko_preproc_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 4": - -ko_preproc_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 5": - -ko_preproc_indent.c: KO! - PREPROC_BAD_INDENT (line: 1, col: 1): Bad preprocessor indentation - PREPROC_BAD_INDENT (line: 3, col: 1): Bad preprocessor indentation - PREPROC_BAD_INDENT (line: 4, col: 1): Bad preprocessor indentation - PREPROC_BAD_INDENT (line: 5, col: 1): Bad preprocessor indentation diff --git a/norminette/tests/rules/ko_var_name.out b/norminette/tests/rules/ko_var_name.out deleted file mode 100644 index 8ce9be4d..00000000 --- a/norminette/tests/rules/ko_var_name.out +++ /dev/null @@ -1,9 +0,0 @@ -ko_var_name.c - IsVarDeclaration In "GlobalScope" from "None" line 1": - -ko_var_name.c - IsVarDeclaration In "GlobalScope" from "None" line 2": - -ko_var_name.c: KO! - SPACE_REPLACE_TAB (line: 1, col: 4): Found space when expecting tab - GLOBAL_VAR_NAMING (line: 1, col: 5): Global variable must start with g_ - SPACE_REPLACE_TAB (line: 2, col: 4): Found space when expecting tab - GLOBAL_VAR_NAMING (line: 2, col: 5): Global variable must start with g_ diff --git a/norminette/tests/rules/ok_func_name.c b/norminette/tests/rules/ok_func_name.c deleted file mode 100644 index ebaaea7c..00000000 --- a/norminette/tests/rules/ok_func_name.c +++ /dev/null @@ -1,6 +0,0 @@ -int func(void); - -int func2(void) -{ - return (1); -} diff --git a/norminette/tests/rules/ok_func_name.out b/norminette/tests/rules/ok_func_name.out deleted file mode 100644 index c2da99b6..00000000 --- a/norminette/tests/rules/ok_func_name.out +++ /dev/null @@ -1,13 +0,0 @@ -ok_func_name.c - IsFuncPrototype In "GlobalScope" from "None" line 1": - -ok_func_name.c - IsEmptyLine In "GlobalScope" from "None" line 2": - -ok_func_name.c - IsFuncDeclaration In "GlobalScope" from "None" line 3": - -ok_func_name.c - IsBlockStart In "Function" from "GlobalScope" line 4": - -ok_func_name.c - IsExpressionStatement In "Function" from "GlobalScope" line 5": - -ok_func_name.c - IsBlockEnd In "Function" from "GlobalScope" line 6": - -ok_func_name.c: OK! diff --git a/norminette/tests/rules/ok_include.out b/norminette/tests/rules/ok_include.out deleted file mode 100644 index c4342586..00000000 --- a/norminette/tests/rules/ok_include.out +++ /dev/null @@ -1,9 +0,0 @@ -ok_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": - > -ok_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": - -ok_include.c - IsEmptyLine In "GlobalScope" from "None" line 3": - -ok_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 4": - > -ok_include.c: OK! diff --git a/norminette/tests/rules/ok_preproc_define.out b/norminette/tests/rules/ok_preproc_define.out deleted file mode 100644 index 2a071c04..00000000 --- a/norminette/tests/rules/ok_preproc_define.out +++ /dev/null @@ -1,7 +0,0 @@ -ok_preproc_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": - -ok_preproc_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": - -ok_preproc_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 3": - -ok_preproc_define.c: OK! diff --git a/norminette/tests/rules/rule_tester.py b/norminette/tests/rules/rule_tester.py deleted file mode 100644 index 1283def1..00000000 --- a/norminette/tests/rules/rule_tester.py +++ /dev/null @@ -1,74 +0,0 @@ -import unittest -import glob -import difflib -import sys -from lexer import Lexer -from registry import Registry -from context import Context -from io import StringIO - - -registry = Registry() - - -def read_file(filename): - with open(filename) as f: - return f.read() - - -class norminetteRuleTester(): - def __init__(self): - self.__tests = 0 - self.__failed = 0 - self.__success = 0 - self.result = [] - - def assertEqual(self, test, ref): - if test == ref: - self.__success += 1 - print("OK") - self.result.append("✓ ") - else: - self.__failed += 1 - print("KO") - diff = difflib.ndiff(test.splitlines(keepends=True), - ref.splitlines(keepends=True)) - diff = list(diff) - self.result.append("✗ ") - print(''.join(diff)) - - def test_file(self, filename): - stdout = sys.stdout - sys.stdout = buff = StringIO() - lexer = Lexer(read_file(filename)) - context = Context(filename, lexer.get_tokens(), debug=2) - registry.run(context, read_file(filename)) - reference_output = read_file(filename.split(".")[0] + ".out") - sys.stdout = stdout - self.assertEqual(buff.getvalue(), reference_output) - - def run_tests(self): - files = glob.glob("tests/rules/*.[ch]") - files.sort() - for f in files: - self.__tests += 1 - print("TESTER -", f.split('/')[-1], end=": ") - try: - self.test_file(f) - except Exception as e: - self.__failed += 1 - print("KO") - print(e) - self.result.append("✗ ") - continue - print("----------------------------------") - print(f"Total {self.__tests}") - print("".join(self.result)) - print(f"Success {self.__success}, Failed {self.__failed}: ", end="") - print("✅ OK!" if self.__failed == 0 else "❌ KO!") - - sys.exit(0 if self.__failed == 0 else 1) - - -if __name__ == '__main__': - norminetteRuleTester().run_tests() diff --git a/norminette/tests/rules/test_file_1012_4.out b/norminette/tests/rules/test_file_1012_4.out deleted file mode 100644 index 43409162..00000000 --- a/norminette/tests/rules/test_file_1012_4.out +++ /dev/null @@ -1,5 +0,0 @@ -test_file_1012_4.c - IsVarDeclaration In "GlobalScope" from "None" line 1": - -test_file_1012_4.c - IsVarDeclaration In "GlobalScope" from "None" line 2": - -test_file_1012_4.c: OK! diff --git a/norminette/tests/rules/testfile_210104_4.out b/norminette/tests/rules/testfile_210104_4.out deleted file mode 100644 index 471f6add..00000000 --- a/norminette/tests/rules/testfile_210104_4.out +++ /dev/null @@ -1,3 +0,0 @@ -testfile_210104_4.c - IsVarDeclaration In "GlobalScope" from "None" line 1": - -testfile_210104_4.c: OK! diff --git a/norminette/tests/time.zsh b/norminette/tests/time.zsh deleted file mode 100755 index de82253a..00000000 --- a/norminette/tests/time.zsh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/zsh -time zsh tests/loop.zsh -echo "ran on $(echo $(ls **/*.c | wc -l) "* 100" | bc) files" diff --git a/norminette/tools/colors.py b/norminette/tools/colors.py index 9a3e11bc..e6a0da9c 100644 --- a/norminette/tools/colors.py +++ b/norminette/tools/colors.py @@ -29,12 +29,12 @@ def colors(text, *argv): "light_magenta": 95, "light_cyan": 96, "white": 97, - "reset_all": 0 + "reset_all": 0, } reset = "\u001b[0m" tmp = [] for arg in argv: tmp.append(str(options.get(arg, 0))) - sep = ';' + sep = ";" res = f"\u001b[{sep.join(tmp)}m{text}{reset}" - return (res) + return res diff --git a/norminette/version.py b/norminette/version.py deleted file mode 100644 index 48fb20af..00000000 --- a/norminette/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "3.1.3" \ No newline at end of file diff --git a/pdf/Makefile b/pdf/Makefile deleted file mode 100644 index 2990ae25..00000000 --- a/pdf/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -# List the pdf's to build. foo.tex will produce foo.pdf -TARGETS = en.norm.pdf - -# List the files included in the slides -# DEPS = somePicture.png someSound.flac someOtherPicture.png -DEPS = - -# Relative path to the LaTeX documentclass setup files -# Adapt as needed -RELPATH = $(shell git rev-parse --show-toplevel)/resources/latex - -# You should not touch this either -include $(RELPATH)/Makefile.LaTeX diff --git a/pdf/README b/pdf/README new file mode 100644 index 00000000..67598e40 --- /dev/null +++ b/pdf/README @@ -0,0 +1,7 @@ + +The Norm pdf + +- As a student, you are welcome to do merge requests on .tex files +- The english version is always the most recent. If you want to suggest an update on an older version of another language, we advice to wait for the latest version for your language. + +- As a staff member, please update first 42born2git, which contain the main source of the Norm. diff --git a/pdf/en.norm.pdf b/pdf/en.norm.pdf index 92831541..533377f2 100644 Binary files a/pdf/en.norm.pdf and b/pdf/en.norm.pdf differ diff --git a/pdf/en.norm.pdf.version b/pdf/en.norm.pdf.version new file mode 100644 index 00000000..8a36cd14 --- /dev/null +++ b/pdf/en.norm.pdf.version @@ -0,0 +1 @@ +4.1 \ No newline at end of file diff --git a/pdf/en.norm.tex b/pdf/en.norm.tex index 3ba7a01e..ccc417b0 100644 --- a/pdf/en.norm.tex +++ b/pdf/en.norm.tex @@ -1,5 +1,4 @@ \documentclass{42-en} -\newcommand\qdsh{\texttt{42sh}} @@ -11,13 +10,13 @@ \begin{document} \title{The Norm} -\subtitle{Version 3} +\subtitle{Version 4.1} \summary { - This document describes the applicable standard (Norm) at 42. A - programming standard defines a set of rules to follow when writing code. - The Norm applies to all C projects within the Inner Circle by default, and + This document describes the applicable standard (Norm) at 42: a + programming standard that defines a set of rules to follow when writing code. + The Norm applies to all C projects within the Common Core by default, and to any project where it's specified. } @@ -29,27 +28,91 @@ %******************************************************************************% % % -% Avant-propos % +% Foreword % % % %******************************************************************************% \chapter{Foreword} - The Norm is in python and open source. \\ - Its repository is available at https://github.com/42School/norminette.\\ - Pull requests, suggestions and issues are welcome! +The \texttt{norminette} is a Python and open source code that checks Norm +compliance of your source code. It checks many constraints of the Norm, but not +all of them (eg. subjective constraints). Unless specific local regulations on +your campus, the \texttt{norminette} prevails during evaluations on the controlled +items. In the following pages, rules that are not checked by the \texttt{norminette} +are marked with \textit{(*)}, and can lead to project failure (using the Norm flag) if +discovered by the evaluator during a code review.\\ + +Its repository is available at https://github.com/42School/norminette.\\ + +Pull requests, suggestions and issues are welcome! + +\newpage + + +%******************************************************************************% +% +% Pedago explanations % +% +%******************************************************************************% + \chapter{Why?} + + The Norm has been carefully crafted to fulfill many pedagogical needs. Here + are the most important reasons for all the choices below: + \begin{itemize} + + \item Sequencing: coding implies splitting a big and complex task into a long series + of elementary instructions. All these instructions will be executed in sequence: + one after another. A beginner that starts creating software needs a simple and clear + architecture for their project, with a full understanding of all individual instructions + and the precise order of execution. Cryptic language syntaxes that do multiple + instructions apparently at the same time are confusing, functions that try to address + multiple tasks mixed in the same portion of code are source of errors.\\ + The Norm asks you to create simple pieces of code, where the unique task of each piece + can be clearly understood and verified, and where the sequence of all the executed + instructions leaves no doubt. That's why we ask for 25 lines maximum in functions, also why + \texttt{for}, \texttt{do .. while}, or ternaries are forbidden. + + \item Look and Feel: while exchanging with your friends and workmates during the + normal peer-learning process, and also during the peer-evaluations, you do not + want to spend time to decrypt their code, but directly talk about the + logic of the piece of code.\\ + The Norm asks you to use a specific look and feel, providing instructions for the naming + of the functions and variables, indentation, brace rules, tab and spaces at many places... . + This will allow you to smoothly have a look at other's codes that will look familiar, + and get directly to the point instead of spending time reading the code before understanding it. + The Norm also comes as a trademark. As part of the 42 community, you will be able to + recognize code written by another 42 student or alumni when you'll be in the labor market. + + \item Long-term vision: making the effort to write understandable code is the + best way to maintain it. Each time that someone else, including you, has to fix a bug + or add a new feature they won't have to lose their precious time trying to figure out + what it does if previously you did things in the right way. This will avoid situations + where pieces of code stop being maintained just because it is time-consuming, and that + can make the difference when we talk about having a successful product in the market. + The sooner you learn to do so, the better. + + \item References: you may think that some, or all, the rules included on the Norm are + arbitrary, but we actually thought and read about what to do and how to do it. We highly + encourage you to Google why the functions should be short and just do one thing, why the + name of the variables should make sense, why lines shouldn't be longer than 80 columns wide, + why a function should not take many parameters, why comments should be useful, etc. + + \end{itemize} + + +\newpage %******************************************************************************% % % -% Norme % +% The Norm % % % %******************************************************************************% \chapter{The Norm} %******************************************************************************% -% Conventions de denomination % +% Naming conventions % %******************************************************************************% - \section{Denomination} + \section{Naming} \begin{itemize} @@ -65,30 +128,22 @@ \chapter{The Norm} \item A global's name must start by \texttt{g\_}. - \item Variables and functions names can only contain lowercases, digits and - '\_' (Unix Case). + \item Identifiers, like variables, functions names, user defined types, + can only contain lowercases, digits and '\_' (snake\_case). No capital letters are allowed. \item Files and directories names can only contain lowercases, digits and - '\_' (Unix Case). + '\_' (snake\_case). \item Characters that aren't part of the standard - ascii table are forbidden. - - \item Variables, functions, and any other identifier must use - snake case. No capital letters, and each word separated by an - underscore. + ASCII table are forbidden, except inside litteral strings and chars. - \item All identifiers (functions, macros, types, - variables, etc) must be in English. + \item \textit{(*)} All identifiers (functions, types, + variables, etc.) names should be explicit, or a mnemonic, + should be readable in English, with each word separated by an underscore. + This applies to macros, filenames and directories as well. - \item Objects (variables, functions, macros, types, - files or directories) must have the most - explicit or most mnemonic names as possible. - - \item Global variables are forbidden, expect where it's - mandatory to use them (Signal handling for one). Using - a global variable in a project where it's not explicitly - allowed is a norm error. + \item Using global variables that are not marked const or static is + forbidden and is considered a norm error, unless the project explicitly allows them. \item The file must compile. A file that doesn't compile isn't expected to pass the Norm. @@ -96,74 +151,81 @@ \chapter{The Norm} \newpage %******************************************************************************% -% Formatage % +% Formatting % %******************************************************************************% \section{Formatting} \begin{itemize} - \item You must indent your code with 4-space - tabulations. This is not the same as 4 average - spaces, we're talking about real tabulations here. + \item Each function must be at most 25 lines long, not + counting the function's own braces. - \item Each function must be maximum 25 lines, not - counting the function's own curly brackets. + \item Each line must be at most 80 columns wide, comments + included. Warning: a tabulation doesn't count + as a single column, but as the number of spaces it + represents. - \item Each line must be at most 80 columns wide, comments - included. Warning : a tabulation doesn't count - as a column, but as the number of spaces it - represents. + \item Functions must be separated by an empty line. Comments or preprocessor instructions + can be inserted between functions. At least an empty line must exists. - \item One instruction per line. + \item You must indent your code with 4-char-long tabulations. + This is not the same as 4 spaces, we're talking about real tabulations here (ASCII char number 9). + Check that your code editor is correctly configured in order to visually get a proper indentation + that will be validated by the \texttt{norminette}. - \item An empty line must be empty: no spaces or tabulations. + \item Blocks within braces must be indented. Braces are alone on their own line, + except in declaration of struct, enum, union. - \item A line can never end with spaces or tabulations. + \item An empty line must be empty: no spaces or tabulations. - \item You can never have two consecutive spaces. + \item A line can never end with spaces or tabulations. + + \item You can never have two consecutive empty lines. + You can never have two consecutive spaces. - \item You need to start a new line after each curly bracket - or end of control structure. + \item Declarations must be at the beginning of a function. - \item Unless it's the end of a line, each comma or semi-colon - must be followed by a space. + \item All variable names must be indented on the same + column in their scope. Note: types are already indented by the containing block. - \item Each operator or operand must be separated by one - - and only one - space. + \item The asterisks that go with pointers must be stuck to + variable names. - \item Each C keyword must be followed by a space, except for - keywords for types (such as int, char, float, etc.), - as well as sizeof. + \item One single variable declaration per line. - \item Each variable declaration must be indented on the same - column for its scope. + \item Declaration and an initialisation cannot be + on the same line, except for global variables (when allowed), + static variables, and constants. - \item The asterisks that go with pointers must be stuck to - variable names. + \item In a function, you must place an empty line between + variable declarations and the remaining of the function. + No other empty lines are allowed in a function. - \item One single variable declaration per line. + \item Only one instruction or control structure per line is allowed. Eg.: Assignment in + a control structure is forbidden, two or multiple assignments on the same line is forbidden, + a newline is needed at the end of a control structure, ... . - \item We cannot stick a declaration and an initialisation - on the same line, except for global variables (when allowed), - static variables, and constants. + \item An instruction or control structure can be split into multiple lines when needed. + The following lines created must be indented compared to the first line, + natural spaces will be used to cut the line, and if applies, operators will be + at the beginning of the new line and not at the end of the previous one. - \item Declarations must be at the beginning of a function, - and must be separated by an empty line. + \item Unless it's the end of a line, each comma or semi-colon + must be followed by a space. - \item In a function, you must place an empty line between - variable declarations and the remaining of the function. - No other empty lines are allowed in a function. + \item Each operator or operand must be separated by one + - and only one - space. - \item Multiple assignments are strictly forbidden. + \item Each C keyword must be followed by a space, except for + keywords for types (such as int, char, float, etc.), + as well as sizeof. - \item You may add a new line after an instruction or - control structure, but you'll have to add an - indentation with brackets or affectation operator. - Operators must be at the beginning of a line. + \item Control structures (if, while..) must use braces, unless they contain a single + instruction on a single line. \end{itemize} - \newpage +\vspace{1cm} General example: \begin{42ccode} @@ -186,30 +248,32 @@ \chapter{The Norm} \newpage %******************************************************************************% -% Parametres de fonction % +% Function parameters % %******************************************************************************% \section{Functions} \begin{itemize} - \item A function can take 4 named parameters maximum. + \item A function can take 4 named parameters at most. \item A function that doesn't take arguments must be - explicitely prototyped with the word "void" as + explicitly prototyped with the word "void" as the argument. \item Parameters in functions' prototypes must be named. - \item Each function must be separated from the next by - an empty line. - \item You can't declare more than 5 variables per function. - \item Return of a function has to be between parenthesis. + \item Return of a function has to be between parenthesis, unless the + function returns nothing. \item Each function must have a single tabulation between its return type and its name. + \end{itemize} + +\vspace{1cm} + \begin{42ccode} int my_func(int arg1, char arg2, char *arg3) { @@ -222,29 +286,31 @@ \chapter{The Norm} } \end{42ccode} - \end{itemize} \newpage %******************************************************************************% -% Typedef, struct, enum et union % +% Typedef, struct, enum and union % %******************************************************************************% \section{Typedef, struct, enum and union} \begin{itemize} - \item Add a tabulation when declaring a struct, enum or union. + \item As other C keywords, add a space between ``struct'' and the name + when declaring a struct. Same applies to enum and union. - \item When declaring a variable of type struct, enum or union, - add a single space in the type. + \item When declaring a variable of type struct, apply the usual indentation for the name + of the variable. Same applies to enum and union. - \item When declaring a struct, union or enum with a typedef, - all indentation rules apply. You must align the typedef's name - with the struct/union/enum's name. + \item Inside the braces of the struct, enum, union, regular indentation rules + apply, like any other blocks. - \item You must indent all structures names on the same column for their scope. + \item As other C keywords, add a space after ``typedef'', + and apply regular indentation for the new defined name. - \item You cannot declare a structure in a .c file. + \item You must indent all structures' names on the same column for their scope. + + \item You cannot declare a structure in a .c file. \end{itemize} \newpage @@ -253,28 +319,30 @@ \chapter{The Norm} %******************************************************************************% % Headers % %******************************************************************************% - \section{Headers} + \section{Headers - a.k.a include files} \begin{itemize} - \item The things allowed in header files are : + \item \textit{(*)} The allowed elements of a header file are: header inclusions (system or not), declarations, defines, prototypes and macros. \item All includes must be at the beginning of the file. - \item You cannot include a C file. + \item You cannot include a C file in a header file or another C file. - \item We'll protect headers from double inclusions. If the file is + \item Header files must be protected from double inclusions. If the file is \texttt{ft\_foo.h}, its bystander macro is \texttt{FT\_FOO\_H}. - \item Unused header inclusions (.h) are forbidden. + \item \textit{(*)} Inclusion of unused headers is forbidden. - \item All header inclusions must be justified in a .c file - as well as in a .h file. + \item Header inclusion can be justified in the .c file and in the .h file itself + using comments. \end{itemize} +\vspace{1cm} + \begin{42ccode} #ifndef FT_HEADER_H # define FT_HEADER_H @@ -282,43 +350,73 @@ \chapter{The Norm} # include # define FOO "bar" -int g_variable; -struct s_struct; +int g_variable; +struct s_struct; #endif \end{42ccode} \newpage + +%******************************************************************************% +% The 42 header % +%******************************************************************************% + + \section{The 42 header - a.k.a start a file with style} + + \begin{itemize} + + \item Every .c and .h file must immediately begin with the standard 42 header: + a multi-line comment with a special format including useful informations. The + standard header is naturally available on computers in clusters for various + text editors (emacs: using \texttt{C-c C-h}, vim using \texttt{:Stdheader} or + \texttt{F1}, etc...). + + \item \textit{(*)} The 42 header must contain several informations up-to-date, including the + creator with login and student email (@student.campus), the date of creation, + the login and date of the last update. Each time the file is saved on disk, + the information should be automatically updated. + + \end{itemize} + \info{ + The default standard header may not automatically be configured with your personnal + information. You may need to change it to follow the previous rule. + } + + \newpage + + %******************************************************************************% -% Macros et pre-processeur % +% Macros and Pre-processors % %******************************************************************************% \section{Macros and Pre-processors} \begin{itemize} - \item Preprocessor constants (or \#define) you create must be used - only for associate literal and constant values. - \item All \#define created to bypass the norm and/or obfuscate - code are forbidden. This point must be checked by a human. - \item You can use macros available in standard libraries, only + \item \textit{(*)} Preprocessor constants (or \#define) you create must be used + only for literal and constant values. + \item \textit{(*)} All \#define created to bypass the norm and/or obfuscate + code are forbidden. + \item \textit{(*)} You can use macros available in standard libraries, only if those ones are allowed in the scope of the given project. \item Multiline macros are forbidden. - \item Only macros names are uppercase. - \item You must indent characters following \#if , \#ifdef - or \#ifndef. + \item Macro names must be all uppercase. + \item You must indent preprocessor directives inside \#if, \#ifdef + or \#ifndef blocks. + \item Preprocessor instructions are forbidden outside of global scope. \end{itemize} \newpage %******************************************************************************% -% Choses interdites ! % +% Forbidden stuff! % %******************************************************************************% - \section{Forbidden stuff !} + \section{Forbidden stuff!} \begin{itemize} - \item You're not allowed to use : + \item You're not allowed to use: \begin{itemize} @@ -330,16 +428,21 @@ \chapter{The Norm} \end{itemize} - \item ternary operators such as `?'. + \item Ternary operators such as `?'. \item VLAs - Variable Length Arrays. + \item Implicit type in variable declarations + \end{itemize} + +\vspace{1cm} + \begin{42ccode} int main(int argc, char **argv) { int i; - char string[argc]; // This is a VLA + char str[argc]; // This is a VLA i = argc > 5 ? 0 : 1 // Ternary } @@ -347,32 +450,42 @@ \chapter{The Norm} \newpage %******************************************************************************% -% Commentaires % +% Comments % %******************************************************************************% \section{Comments} \begin{itemize} - \item Comments cannot be inside functions' bodies. - Comments must be at the end of a line, or on their own line + \item Comments cannot be inside function bodies. + Comments must be at the end of a line, or on their own line - \item You comments must be in English. And they must be - useful. + \item \textit{(*)} Your comments should be in English, and useful. - \item A comment cannot justify a "bastard" function. + \item \textit{(*)} A comment cannot justify the creation of a carryall or bad function. \end{itemize} + + \warn{ + A carryall or bad function usually comes with names that are + not explicit such as f1, f2... for the function and a, b, c,.. + for the variables names. + A function whose only goal is to avoid the norm, without a unique + logical purpose, is also considered as a bad function. + Please remind that it is desirable to have clear and readable functions that achieve a + clear and simple task each. Avoid any code obfuscation techniques, + such as the one-liner, ... . + } \newpage %******************************************************************************% -% Les fichiers % +% Files % %******************************************************************************% \section{Files} \begin{itemize} - \item You cannot include a .c file. + \item You cannot include a .c file in a .c file. \item You cannot have more than 5 function-definitions in a .c file. @@ -385,31 +498,30 @@ \chapter{The Norm} %******************************************************************************% \section{Makefile} - Makefile aren't checked by the Norm, and must be checked during evaluation by - the student. + Makefiles aren't checked by the \texttt{norminette}, and must be checked during evaluation by + the student when asked by the evaluation guidelines. Unless specific instructions, the following rules + apply to the Makefiles: \begin{itemize} - \item The \$(NAME), clean, fclean, re and all - rules are mandatory. + \item The \textit{\$(NAME)}, \textit{clean}, \textit{fclean}, \textit{re} and \textit{all} + rules are mandatory. The \textit{all} rule must be the default one and executed when typing just \texttt{make}. - \item If the makefile relinks, the project will be considered + \item If the makefile relinks when not necessary, the project will be considered non-functional. - \item In the case of a multibinary project, on top of the - rules we've seen, you must have a rule that compiles - both binaries as well as a specific rule for each - binary compiled. + \item In the case of a multibinary project, in addition to + the above rules, you must have a rule for each binary (eg: \$(NAME\_1), \$(NAME\_2), ...). + The ``all'' rule will compile all the binaries, using each binary rule. - \item In the case of a project that calls a functions library - (e.g.: \texttt{libft}), your makefile must compile + \item In the case of a project that calls a function from a non-system library + (e.g.: \texttt{libft}) that exists along your source code, your makefile must compile this library automatically. - \item All source files you need to compile your project must - be explicitly named in your Makefile. + \item All source files needed to compile your project must + be explicitly named in your Makefile. Eg: no ``*.c'', no ``*.o'' , etc ... \end{itemize} - \end{document} %******************************************************************************% diff --git a/pdf/es.norm.pdf b/pdf/es.norm.pdf new file mode 100644 index 00000000..bef087cd Binary files /dev/null and b/pdf/es.norm.pdf differ diff --git a/pdf/es.norm.pdf.version b/pdf/es.norm.pdf.version new file mode 100644 index 00000000..bf0d87ab --- /dev/null +++ b/pdf/es.norm.pdf.version @@ -0,0 +1 @@ +4 \ No newline at end of file diff --git a/pdf/es.norm.tex b/pdf/es.norm.tex new file mode 100644 index 00000000..8558097f --- /dev/null +++ b/pdf/es.norm.tex @@ -0,0 +1,481 @@ +\documentclass{42-es} + +%******************************************************************************% +% % +% Prologue % +% % +%******************************************************************************% + +\begin{document} +\title{La Norma} +\subtitle{Version 4} + +\summary +{ + Este documento describe la norma aplicable en 42. Una norma de + programación + define un conjunto de reglas a seguir al escribir código. La Norma se + aplica + a todos los proyectos de C dentro del Common Core por defecto, y a + cualquier + proyecto donde se especifique. + This document describes the applicable standard (Norm) at 42. A + programming standard defines a set of rules to follow when writing code. + The Norm applies to all C projects within the Common Core by default, and + to any project where it's specified. +} + +\maketitle + +\tableofcontents + +%******************************************************************************% +% % +% Foreword % +% % +%******************************************************************************% +\chapter{Prefacio} + +La \texttt{norminette} está escrita en python y es de código abierto. \\ +Su repositorio está disponible en \url{https://github.com/42School/norminette}.\\ +¡Pull request, sugerencias e issues serán bien recibidos! + +\newpage + +%******************************************************************************% +% +% Pedago explanations % +% +%******************************************************************************% +\chapter{¿Por qué?} + +La Norma ha sido cuidadosamente elaborada para cumplir con muchas +necesidades pedagógicas. Aquí están las razones más importantes para todas las +elecciones a continuación: +\begin{itemize} + + \item Secuenciación: programar implica dividir una tarea grande y + compleja en una larga serie de instrucciones elementales. Todas + estas instrucciones se ejecutarán en secuencia: una tras otra. Un + principiante que comienza a crear software necesita una + arquitectura + simple y clara para su proyecto, con una comprensión completa de + todas las instrucciones individuales y el orden preciso de + ejecución. Los lenguajes de programación crípticos que hacen + múltiples instrucciones aparentemente al mismo tiempo son + confusos, las funciones que intentan abordar múltiples tareas + mezcladas en la misma porción de código son fuente de errores.\\ + La Norma te pide que crees piezas de código simples, donde la + tarea única de cada pieza pueda ser claramente entendida y + verificada, y donde la secuencia de todas las instrucciones + ejecutadas no deje lugar a dudas. Por eso pedimos un máximo de 25 + líneas en las funciones, también por qué se prohíben los + \texttt{for}, \texttt{do .. while}, o ternarios. + \item Aspecto: al intercambiar con tus amigos y compañeros de trabajo + durante el proceso normal de aprendizaje entre pares, y también + durante las evaluaciones entre pares, no quieres perder tiempo + descifrando su código, sino hablar directamente sobre la lógica de + la pieza de código.\\ + La Norma te pide que uses un aspecto específico, proporcionando + instrucciones para el nombre de las funciones y variables, + indentación, reglas de llaves, tabulaciones y espacios en muchos + lugares... . Esto te permitirá echar un vistazo a otros códigos que + te resultarán familiares y llegar directamente al punto en lugar de + perder tiempo leyendo el código antes de entenderlo. La Norma + también + funciona como una marca registrada. Como parte de la comunidad 42, + podrás reconocer el código escrito por otro estudiante o exalumno + de + 42 cuando estés en el mercado laboral. + + \item Visión a largo plazo: hacer el esfuerzo de escribir código + comprensible es la mejor manera de mantenerlo. Cada vez que alguien + más, incluyéndote a ti, tenga que corregir un error o agregar una + nueva característica, no tendrá que perder su valioso tiempo + tratando de entender lo que hace si previamente hiciste las cosas + de + la manera correcta. Esto evitará situaciones en las que los + fragmentos de código dejen de ser mantenidos solo porque lleva + tiempo, y eso puede marcar la diferencia cuando hablamos de tener + un + producto exitoso en el mercado. Cuanto antes aprendas a hacerlo, + mejor. + + \item Referencias: puedes pensar que algunas, o todas, las reglas + incluidas + en la Norma son arbitrarias, pero en realidad pensamos y leímos + sobre + qué hacer y cómo hacerlo. Te animamos encarecidamente a buscar por + qué + las funciones deben ser cortas y hacer una sola cosa, por qué el + nombre de las variables debe tener sentido, por qué las líneas no + deben ser más largas de 80 columnas, por qué una función no debe + tener muchos parámetros, por qué los comentarios deben ser útiles, + etc, etc, etc... + +\end{itemize} + +\newpage + +%******************************************************************************% +% % +% The Norm % +% % +%******************************************************************************% +\chapter{La Norma} + +%******************************************************************************% +% Naming conventions % +%******************************************************************************% +\section{Denominación} + +\begin{itemize} + + \item El nombre de una estructura debe comenzar con \texttt{s\_}. + \item El nombre de un typedef debe comenzar con \texttt{t\_}. + \item El nombre de un union debe comenzar con \texttt{u\_}. + \item El nombre de un enum debe comenzar con \texttt{e\_}. + \item El nombre de una variable global debe comenzar con \texttt{g\_}. + \item Los nombres de variables y funciones solo pueden contener + minúsculas, dígitos y '\_' (snake\_case). + \item Los nombres de archivos y directorios solo pueden contener + minúsculas, dígitos y '\_' (snake\_case). + \item Los caracteres que no forman parte de la tabla ASCII estándar están + prohibidos. + \item Las variables, funciones y cualquier otro identificador deben usar + snake case. Sin letras mayúsculas, y cada palabra separada por un + guión bajo. + \item Todos los identificadores (funciones, macros, tipos, variables, + etc.) + deben estar en inglés. + \item Los objetos (variables, funciones, macros, tipos, archivos o + directorios) deben tener nombres lo más explícitos o mnemótecnicos + posibles. + \item El uso de variables globales que no están marcadas como const y + static está prohibido y se considera un error de norma, a menos que + el proyecto las permita explícitamente. + \item El archivo debe compilar. Un archivo que no compila no se espera + que + pase la Norma. +\end{itemize} +\newpage + +%******************************************************************************% +% Formatting % +%******************************************************************************% +\section{Formato} + +\begin{itemize} + + \item Debes indentar tu código con tabulaciones de 4 espacios. + Esto no es lo mismo que 4 espacios normales, estamos hablando de + verdaderas tabulaciones. + \item Cada función debe tener un máximo de 25 líneas, sin contar las + llaves + de la función. + \item Cada línea debe tener como máximo 80 columnas de ancho, incluidos + los + comentarios. Advertencia: una tabulación no cuenta como una + columna, + sino como + el número de espacios que representa. + \item Cada función debe estar separada por una nueva línea. Cualquier + comentario + o instrucción de preprocesador puede estar justo encima de la + función. La + nueva línea está después de la función anterior. + \item Una instrucción por línea. + \item Una línea vacía debe estar vacía: sin espacios o tabulaciones. + \item Una línea no puede terminar con espacios o tabulaciones. + \item No puedes tener dos espacios consecutivos. + \item Debes comenzar una nueva línea después de cada llave de apertura o + cierre o después de una estructura de control. + \item Salvo que sea el final de una linea, cada coma o punto y coma + debe ser seguido por un espacio. + \item Cada operador u operando debe estar separado por un (y solo un) + espacio. + \item Cada palabra clave de C debe ir seguida de un espacio, excepto las + palabras clave para tipos (como int, char, float, etc.), así como + sizeof. + \item Cada declaración de variable debe estar indentada en la misma + columna + dentro de su scope. + \item Los asteriscos que acompañan a los punteros deben estar pegados a + los + nombres de las variables. + \item Una sola declaración de variable por línea. + \item La declaración y la inicialización no pueden estar en la misma + línea, + excepto para las variables globales (cuando se permiten), variables + estáticas y + constantes. + \item Las declaraciones deben estar al principio de una función. + \item En una función, debes colocar una línea vacía entre las + declaraciones + de variables y el resto de la función. No se permiten otras líneas + vacías en una función. + \item Las asignaciones múltiples están completamente prohibidas. + \item Puedes agregar una nueva línea después de una instrucción o + estructura + de control, pero tendrás que agregar una indentación con llaves o + operador de asignación. Los operadores deben estar al principio de + una línea. + \item Las estructuras de control (if, while, etc...) deben tener llaves, + salvo que contengan una sola línea. + \item Las llaves que siguen a las funciones, declaradores o estructuras + de + control deben estar precedidas y seguidas por una nueva línea. + +\end{itemize} + +Ejemplo general de formato: +\begin{42ccode} + int g_global; + typedef struct s_struct + { + char *my_string; + int i; + } t_struct; + struct s_other_struct; + + int main(void) + { + int i; + char c; + + return (i); + } +\end{42ccode} +\newpage + +%******************************************************************************% +% Function parameters % +%******************************************************************************% +\section{Funciones} + +\begin{itemize} + + \item Una función no puede recibir más de 4 parámetros. + \item Una función que no recibe argumentos debe ser prototipada con la + palabra ``void'' como argumento. + \item Los parámetros en los prototipos de las funciones deben tener + nombre. + \item Cada función debe estar separada de la siguiente por una línea + vacía. + \item No puedes declarar más de 5 variables por función. + \item El retorno de una función debe estar entre paréntesis. + \item Cada función debe tener una sola tabulación entre su tipo de + retorno + y + su nombre. + + \begin{42ccode} + int my_func(int arg1, char arg2, char *arg3) + { + return (my_val); + } + + int func2(void) + { + return ; + } + \end{42ccode} +\end{itemize} +\newpage + +%******************************************************************************% +% Typedef, struct, enum and union % +%******************************************************************************% +\section{Typedef, struct, enum y union} + +\begin{itemize} + \item Agrega una tabulación al declarar un struct, enum o union. + \item Al declarar una variable de tipo struct, enum o union, agrega un + solo + espacio en el tipo. + \item Al declarar un struct, union o enum con un typedef, se aplican + todas + las reglas de indentación. + \item El nombre del typedef debe ir precedido por una tabulación. + \item Debes indentar todos los nombres de las estructuras en la misma + columna dentro de su scope. + \item No puedes declarar una estructura en un archivo .c. +\end{itemize} +\newpage + +%******************************************************************************% +% Headers % +%******************************************************************************% +\section{Headers - a.k.a archivos include} + +\begin{itemize} + \item Las cosas permitidas en los archivos de cabecera son: + inclusiones de cabecera (de sistema o no), declaraciones, defines, + prototipos y macros. + \item Todos los includes deben estar al principio del archivo. + \item No puedes incluir un archivo C. + \item Los archivos de cabecera deben estar protegidos de inclusiones + dobles. Si el archivo es \texttt{ft\_foo.h}, su macro de protección + es + \texttt{FT\_FOO\_H}. + \item Las inclusiones de cabecera no utilizadas (.h) están prohibidas. + \item Todas las inclusiones de cabecera deben estar justificadas en un + archivo .c, así como en un archivo .h. +\end{itemize} + +\begin{42ccode} + #ifndef FT_HEADER_H + # define FT_HEADER_H + # include + # include + # define FOO "bar" + + int g_variable; + struct s_struct; + + #endif +\end{42ccode} +\newpage + +%******************************************************************************% +% The 42 header % +%******************************************************************************% + +\section{La cabecera de 42 - a.k.a cómo empezar un archivo con estilo} + +\begin{itemize} + \item Todos los archivos .c y .h deben comenzar inmediatamente con la + cabecera 42 estándar: un comentario de varias líneas con un formato + especial + que incluye información útil. La cabecera estándar está + naturalmente + disponible + en las computadoras en clusters para varios editores de texto + (emacs: + usando + \texttt{C-c C-h}, vim usando \texttt{:Stdheader} o \texttt{F1}, + etc...). + \item La cabecera 42 debe contener varias informaciones actualizadas, + incluyendo + el creador con login y correo electrónico, la fecha de creación, el + login y + la fecha de la última actualización. Cada vez que el archivo se + guarda en + disco, la información debe actualizarse automáticamente. +\end{itemize} +\newpage + +%******************************************************************************% +% Macros and Pre-processors % +%******************************************************************************% +\section{Macros y Preprocesadores} + +\begin{itemize} + + \item Las constantes del preprocesador (o \#define) que crees, deben + usarse solo para valores literales y constantes. + \item Todos los \#define creados para evitar la norma y/o para ofuscar + el código están prohibidos. Esta parte debe ser verificada por un + humano. + \item Puedes usar macros disponibles en bibliotecas estándar, solo si + esas están permitidas en el alcance del proyecto dado. + \item Las macros de varias líneas están prohibidas. + \item Los nombres de las macros deben estar en mayúsculas. + \item Debes indentar los caracteres que siguen a \#if, \#ifdef o + \#ifndef. + \item Las instrucciones del preprocesador están prohibidas fuera del + alcance global. +\end{itemize} +\newpage + +%******************************************************************************% +% Forbidden stuff! % +%******************************************************************************% +\section{¡Cosas Prohibidas!} + +\begin{itemize} + \item No puedes utilizar: + \begin{itemize} + + \item for + \item do...while + \item switch + \item case + \item goto + + \end{itemize} + \item Operadores ternarios como `?'. + \item VLAs - o arrays de longitud variable. + \item Tipos implícitos en declaraciones de variables. + +\end{itemize} +\begin{42ccode} + int main(int argc, char **argv) + { + int i; + char string[argc]; // This is a VLA + + i = argc > 5 ? 0 : 1 // Ternary + } +\end{42ccode} +\newpage + +%******************************************************************************% +% Comments % +%******************************************************************************% +\section{Comentarios} + +\begin{itemize} + \item Los comentarios no pueden estar dentro de los cuerpos de las + funciones. Los comentarios deben estar al final de una línea, o en + su propia + línea. + \item Tus comentarios deben estar en inglés. Y deben ser útiles. + \item Un comentario no puede justificar la creación de un carryall o + una mala función. +\end{itemize} + +\warn{ + Un carryall o mala función generalmente viene con nombres que no son + explícitos como f1, f2... para la función y a, b, i,.. para las + declaraciones. + Una función cuyo único objetivo es evitar la norma, sin un propósito + lógico único, también se considera como una mala función. + Por favor, recuerda que es preferible tener funciones claras y legibles + que logren una tarea clara y simple cada una. Evita cualquier técnica + de obfuscación de código, como la de una sola línea. +} +\newpage + +%******************************************************************************% +% Files % +%******************************************************************************% +\section{Archivos} + +\begin{itemize} + \item No puedes incluir un archivo .c + \item No puedes tener más de 5 definiciones de funciones en un archivo + .c. +\end{itemize} +\newpage + +%******************************************************************************% +% Makefile % +%******************************************************************************% +\section{Makefile} +Los Makefiles no son verificados por la Norma, y deben ser verificados durante +la evaluación por el estudiante. + +\begin{itemize} + + \item El \$(NAME), clean, fclean, re y all son reglas obligatorias. + \item Si el makefile hace relinks, el proyecto se considerará no funcional. + \item En el caso de un proyecto multibinario, además de las reglas + anteriores, debes tener una regla que compile ambos binarios, así + como una regla específica para cada binario compilado. + \item En el caso de un proyecto que llama a una función de una librería no del sistema (por ejemplo: libft), tu makefile debe compilar esta biblioteca automáticamente. + \item Todos los archivos fuente que necesitas para compilar tu proyecto deben estar nombrados explícitamente en tu Makefile. +\end{itemize} + +\end{document} +%******************************************************************************% diff --git a/pdf/fr.norm.pdf b/pdf/fr.norm.pdf new file mode 100644 index 00000000..c7e38bec Binary files /dev/null and b/pdf/fr.norm.pdf differ diff --git a/pdf/fr.norm.pdf.version b/pdf/fr.norm.pdf.version new file mode 100644 index 00000000..bf0d87ab --- /dev/null +++ b/pdf/fr.norm.pdf.version @@ -0,0 +1 @@ +4 \ No newline at end of file diff --git a/pdf/fr.norm.tex b/pdf/fr.norm.tex new file mode 100644 index 00000000..0e8d3b74 --- /dev/null +++ b/pdf/fr.norm.tex @@ -0,0 +1,463 @@ +\documentclass{42-fr} +\newcommand\qdsh{\texttt{42sh}} + + + +%******************************************************************************% +% % +% Prologue % +% % +%******************************************************************************% + +\begin{document} +\title{La Norme} +\subtitle{Version 4} + +\summary +{ + Ce document décrit La Norme C en vigueur à 42. Une norme de programmation + définit un ensemble de règles régissant l’écriture d’un code. + La Norme s'applique par défaut à tous les projets C du + Cercle Intérieur, et à tout projet où elle est spécifiée. + +} + +\maketitle + +\tableofcontents + + + +%******************************************************************************% +% % +% Avant-propos % +% % +%******************************************************************************% +\chapter{Avant-propos} + + La Norminette est en Python et est open source.\\ + Vous pouvez en consulter les sources ici : https://github.com/42School/norminette.\\ + Les Pull Requests, suggestions et Issues sont les bienvenues ! +%******************************************************************************% +% +% Explications sur la Pedago % +% +%******************************************************************************% + + \chapter{Pourquoi ?} + + La norme a été soigneusement conçue pour répondre à de nombreux besoins pédagogiques. + Voici les raisons les plus importantes qui justifient tous les choix ci-dessous : + \begin{itemize} + + \item Sequencing : le codage implique la division d'une tâche importante et complexe en une + longue série d'instructions élémentaires. + Toutes ces instructions seront exécutées en séquence: l'une après l'autre. + Un débutant qui commence à créer un logiciel a besoin d'une architecture simple et claire pour son projet, + avec une compréhension totale de toutes les instructions individuelles et l'ordre précis d'exécution. + Les syntaxes de langage cryptiques qui exécutent plusieurs instructions apparemment en même temps sont déroutantes, les fonctions qui tentent d'aborder plusieurs tâches mélangées dans la même portion de code sont sources d'erreurs. + La norme vous demande de créer des morceaux de code simples, où la tâche unique de chaque morceau peut être clairement comprise et vérifiée, et où la séquence de toutes les instructions exécutées ne laisse aucun doute. + C'est pourquoi nous demandons un maximum de 25 lignes pour les fonctions, et aussi pourquoi + \texttt{for}, \texttt{do .. while}, ou les ternaires sont interdits. + + \item L'aspect et la sensation: lorsque vous échangez avec vos amis et collègues de travail au cours du processus normal d'apprentissage par les pairs, et également au cours des évaluations par les pairs, vous n'avez pas l'impression d'être en train d'apprendre. processus normal d'apprentissage par les pairs, ainsi que pendant les évaluations par les pairs, vous ne voulez pas passer du temps à décrypter leur code, mais plutôt à parler directement de ce qu'ils ont fait. pas passer du temps à décrypter leur code, mais parler directement de la logique du morceau de code. \\ + La norme vous demande d'utiliser une présentation spécifique, en fournissant des instructions pour la dénomination des fonctions et des variables, l'indentation, les règles d'accolade, les tabulations et les espaces à de nombreux endroits... + Cela vous permettra de jeter un coup d'œil sur les codes d'autres personnes qui vous sembleront familiers, et d'aller directement à l'essentiel au lieu de passer du temps à lire le code avant de le comprendre. + La norme est également une marque de fabrique. En tant que membre de la communauté 42, vous serez en mesure de reconnaître le code écrit par un autre étudiant ou ancien étudiant de 42 lorsque vous serez sur le marché du travail. + + \item Vision à long terme : faire l'effort d'écrire un code compréhensible est le meilleur moyen de le maintenir. + Chaque fois que quelqu'un d'autre, y compris vous, doit corriger un bogue ou ajouter une nouvelle fonctionnalité, il n'aura pas à perdre son temps précieux à essayer de comprendre ce que fait le code si, auparavant, vous aviez fait les choses de la bonne manière. Cela évitera les situations où des morceaux de code cessent d'être maintenus simplement parce que cela prend du temps, et cela peut faire la différence lorsqu'il s'agit d'avoir un produit réussi sur le marché. + Le plus tôt vous apprendrez à le faire, le mieux ce sera. + + \item Références : vous pouvez penser que certaines, voire toutes les règles incluses dans la norme sont arbitraires, + mais nous avons réellement réfléchi et lu ce qu'il fallait faire et comment le faire. + Nous vous encourageons vivement à chercher sur Google pourquoi les fonctions doivent être courtes et ne faire qu'une chose, + pourquoi le nom des variables doit avoir un sens, pourquoi les lignes ne doivent pas dépasser 80 colonnes, pourquoi une fonction ne doit pas prendre beaucoup de paramètres, + pourquoi les commentaires doivent être utiles, etc, etc, etc... + + \end{itemize} + + +\newpage + +%******************************************************************************% +% % +% Norme % +% % +%******************************************************************************% +\chapter{La Norme} + + +%******************************************************************************% +% Conventions de denomination % +%******************************************************************************% + \section{Conventions de dénomination} + + \begin{itemize} + + \item Un nom de structure doit commencer par + \texttt{s\_}. + + \item Un nom de typedef doit commencer par + \texttt{t\_}. + + \item Un nom d’union doit commencer par \texttt{u\_}. + + \item Un nom d’enum doit commencer par \texttt{e\_}. + + \item Un nom de globale doit commencer par \texttt{g\_}. + + \item Les noms de variables, de fonctions doivent être composés exclusivement de + minuscules, de chiffres et de '\_' (Unix Case). + + \item Les noms de fichiers et de répertoires doivent être composés exclusivement de + minuscules, de chiffres et de '\_' (Unix Case). + + \item Les caractères ne faisant pas partie de la table ASCII standard + ne sont pas autorisés. + + \item Les variables, fonctions, et tout autre identifiant doivent être en Snake Case. + ( En minuscules et en les séparant par des underscore ) + + \item Tous les identifiants (fonctions, macros, types, variables, etc) + doivent être en anglais. + + \item Les objets (variables, fonctions, macros, types, fichiers ou répertoires) + doivent avoir les noms les plus explicites ou mnémoniques possibles. + + \item Les variables globales sont interdites, sauf quand vous êtes obligé d'en utiliser ( signal handling ). L’utilisation d'une variable globale + dans un projet où ce n’est pas explicitement autorisé est une erreur de Norme. + + \item Le fichier doit être compilable. Un fichier qui ne compile pas n'est + pas censé passer La Norme. + \end{itemize} +\newpage + +%******************************************************************************% +% Formatage % +%******************************************************************************% + \section{Formatage} + + \begin{itemize} + + \item Vous devez indenter votre code avec des tabulations de la taille de 4 espaces. + Ce n’est pas équivalent à 4 espaces, ce sont bien des tabulations. + + \item Chaque fonction doit faire au maximum 25 lignes + sans compter les accolades du bloc de la fonction. + + \item Chaque ligne ne peut pas faire plus de 80 colonnes, commentaires compris. + Une tabulation ne compte pas pour une colonne, mais bien pour les \texttt{n} espaces qu’elle représente. + + \item Chaque fonction doit être séparée par une ligne vide de la suivante. + Tout commentaire ou préprocesseur peut se trouver juste au-dessus + de la fonction. Le saut de ligne se trouve après la fonction précédente. + + \item Une seule instruction par ligne + + \item Une ligne vide doit être vide. Elle ne doit pas contenir d’espace ou de tabulation. + + \item Une ligne ne doit jamais se terminer par des espaces ou des tabulations. + + \item Vous ne pouvez pas avoir 2 espaces à la suite. + + \item Quand vous rencontrez une accolade, ouvrante ou fermante, + ou une fin de structure de contrôle, vous devez retourner à la ligne. + + \item Chaque virgule ou point-virgule doit être suivi d’un espace, sauf en fin de ligne. + + \item Chaque opérateur et opérande doivent être séparés par un seul espace. + + \item Chaque mot-clé en C doit être suivi d’un espace, sauf pour ceux + de type (comme \texttt{int}, \texttt{char}, \texttt{float}, etc.) ainsi que \texttt{sizeof}. + + \item Chaque déclaration de variable doit être indentée sur la même colonne. + + \item Les étoiles des pointeurs doivent être collées au nom de la variable. + + \item Une seule déclaration de variable par ligne + + \item On ne peut faire une déclaration et une initialisation sur une même ligne, + à l’exception des variables globales (quand elles sont permises) et des variables statiques. + + \item Les déclarations doivent être en début de fonction et doivent être séparées + de l’implémentation par une ligne vide. + + \item Aucune ligne vide ne doit être présente au milieu des déclarations ou de l’implémentation. + + \item La multiple assignation est interdite. + + \item Vous pouvez retourner à la ligne lors d’une même instruction ou structure de + contrôle, mais vous devez rajouter une indentation par accolade ou opérateur + d’affectation. Les opérateurs doivent être en début de ligne. + \end{itemize} + + \newpage + + Exemple: + \begin{42ccode} +int g_global; +typedef struct s_struct +{ + char *my_string; + int i; +} t_struct; +struct s_other_struct; + +int main(void) +{ + int i; + char c; + + return (i); +} + \end{42ccode} + \newpage + +%******************************************************************************% +% Parametres de fonction % +%******************************************************************************% + \section{Fonctions} + + \begin{itemize} + + \item Une fonction prend au maximum 4 paramètres nommés. + + \item Une fonction qui ne prend pas d’argument doit explicitement être prototypée + avec le mot \texttt{void} comme argument. + + \item Les paramètres des prototypes de fonctions doivent être nommés. + + \item Chaque définition de fonction doit être séparée par une ligne vide de la suivante. + + \item Vous ne pouvez déclarer que 5 variables par bloc au maximum. + + \item Le retour d’une fonction doit se faire entre parenthèses. + + \item Chaque fonction doit avoir une seule tabulation entre son type + de retour et son nom. + + \begin{42ccode} +int my_func(int arg1, char arg2, char *arg3) +{ + return (my_val); +} + +int func2(void) +{ + return ; +} + \end{42ccode} + + \end{itemize} + \newpage + + +%******************************************************************************% +% Typedef, struct, enum et union % +%******************************************************************************% + \section{Typedef, struct, enum et union} + + \begin{itemize} + + \item Vous devez mettre une tabulation lorsque vous déclarez une \texttt{struct}, \texttt{enum} ou \texttt{union}. + + \item Lors de la déclaration d’une variable de type \texttt{struct}, \texttt{enum} ou \texttt{union}, + vous nemettrez qu’un espace dans le type. + + \item Lorsque vous déclarez une \texttt{struct}, \texttt{union} ou \texttt{enum} avec un \texttt{typedef}, + toutes les règles s’appliquent et vous devez aligner le nom du \texttt{typedef} + avec le nom de la \texttt{struct}, \texttt{union} ou \texttt{enum}. + + \item Vous devez indenter tous les noms de structures sur la même colonne. + + \item Vous ne pouvez pas déclarer une structure dans un fichier .c. + + \end{itemize} + \newpage + + +%******************************************************************************% +% Headers % +%******************************************************************************% + \section{Headers} + + \begin{itemize} + + \item Seuls les inclusions de headers (système ou non), + les déclarations, les \texttt{defines}, les prototypes et les macros + sont autorisés dans les fichiers headers. + + \item Tous les \texttt{includes} doivent se faire au début du fichier. + + \item Vous ne pouvez pas inclure de fichier C. + + \item On protègera les headers contre la double inclusion. Si le fichier est + \texttt{ft\_foo.h}, la macro témoin est \texttt{FT\_FOO\_H}. + + \item Une inclusion de header (.h) dont on ne se sert pas est interdite. + + \item Toute inclusion de header doit être justifiée autant dans un \texttt{.c} + que dans un \texttt{.h}. + + \end{itemize} + + \begin{42ccode} +#ifndef FT_HEADER_H +# define FT_HEADER_H +# include +# include +# define FOO "bar" + +int g_variable; +struct s_struct; + +#endif + \end{42ccode} + \newpage + +%******************************************************************************% +% The 42 header % +%******************************************************************************% + + \section{L'en-tête 42 - c'est-à-dire commencer un fichier avec style} + + \begin{itemize} + + \item Tout fichier .c et .h doit immédiatement commencer par l'en-tête standard 42: + un commentaire de plusieurs lignes avec un format spécial comprenant des informations utiles. + L'en-tête standard est naturellement disponible sur les ordinateurs dans les clusters des différents éditeurs de texte + (emacs : en utilisant \texttt{C-c C-h}, vim en utilisant \texttt{:Stdheader} ou \texttt{F1}, etc...) + + \item L'en-tête 42 doit contenir plusieurs informations à jour, notamment le créateur avec son login et son email, + la date de création, le login et la date de la dernière mise à jour. + Chaque fois que le fichier est enregistré sur le disque, les informations doivent être automatiquement mises à jour. + + \end{itemize} + \newpage + + +%******************************************************************************% +% Macros et pre-processeur % +%******************************************************************************% + \section{Macros et Préprocesseur} + + \begin{itemize} + + \item Les constantes de préprocesseur (or \texttt{\#define}) que vous créez ne doivent + être utilisés que pour associer des valeurs littérales et constantes, et rien d’autre. + \item Les \texttt{\#define} érigés dans le but de contourner la norme et/ou obfusquer + du code interdit par la norme sont interdites. + Ce point doit être vérifiable par un humain. + \item Vous pouvez utiliser les macros présentes dans les bibliothèques standards, + si cette dernière est autorisée dans le projet ciblé. + \item Les macros multilignes sont interdites. + \item Seuls les noms de macros sont en majuscules. + \item Il faut indenter les caractères qui suivent un \texttt{\#if}, \texttt{\#ifdef} + ou \texttt{\#ifndef}. + + \end{itemize} + \newpage + + +%******************************************************************************% +% Choses interdites ! % +%******************************************************************************% + \section{Choses Interdites !} + + \begin{itemize} + + \item Vous n’avez pas le droit d’utiliser : + + \begin{itemize} + + \item \texttt{for} + \item \texttt{do...while} + \item \texttt{switch} + \item \texttt{case} + \item \texttt{goto} + + \end{itemize} + + \item Les opérateurs ternaires, comme \texttt{?}. + + \item Les tableaux à taille variable (VLA - Variable Length Array). + + \item Les types implicites dans les déclarations de variable. + + \end{itemize} + \begin{42ccode} + int main(int argc, char **argv) + { + int i; + char string[argc]; // Tableau a taille variable (VLA) + + i = argc > 5 ? 0 : 1 // Ternaire + } + \end{42ccode} + \newpage + +%******************************************************************************% +% Commentaires % +%******************************************************************************% + \section{Commentaires} + + \begin{itemize} + + \item Il ne doit pas y avoir de commentaires dans le corps des fonctions. + Les commentaires doivent se trouver à la fin d'une ligne ou sur leur propre ligne. + + \item Vos commentaires doivent être en anglais et utiles. + + \item Les commentaires ne peuvent pas justifier une fonction bâtarde. + + \end{itemize} + \newpage + + +%******************************************************************************% +% Les fichiers % +%******************************************************************************% + \section{Les fichiers} + + \begin{itemize} + + \item Vous ne pouvez pas inclure un \texttt{.c}. + + \item Vous ne pouvez pas avoir plus de 5 définitions de fonctions dans un \texttt{.c}. + + \end{itemize} + \newpage + + +%******************************************************************************% +% Makefile % +%******************************************************************************% + \section{Makefile} + + Les Makefile ne sont pas vérifiés pas La Norminette. + Ils doivent être vérifiés par un humain pendant l'évaluation. + \begin{itemize} + + \item Les règles \texttt{\$(NAME)}, \texttt{clean}, \texttt{fclean}, \texttt{re} et \texttt{all} + sont obligatoires. + + \item Le projet est considéré comme non fonctionnel si le Makefile "relink". + + \item Dans le cas d’un projet multibinaire, en plus des règles précédentes, + vous devez avoir une règle \texttt{all} compilant les deux binaires ainsi qu’une règle + spécifique à chaque binaire compilé. + + \item Dans le cas d’un projet faisant appel à une bibliothèque de fonctions + (par exemple une \texttt{libft}), votre makefile doit compiler + automatiquement cette bibliothèque. + + \item Les sources nécessaires à la compilation de votre programme doivent être + explicitement citées dans votre Makefile. + + \end{itemize} + + + +\end{document} +%******************************************************************************% diff --git a/pdf/fr.norme.pdf b/pdf/fr.norme.pdf new file mode 100644 index 00000000..ef1d7879 Binary files /dev/null and b/pdf/fr.norme.pdf differ diff --git a/pdf/fr.norme.tex b/pdf/fr.norme.tex new file mode 100644 index 00000000..4d2cb141 --- /dev/null +++ b/pdf/fr.norme.tex @@ -0,0 +1,406 @@ +\documentclass{42-fr} +\newcommand\qdsh{\texttt{42sh}} + + + +%******************************************************************************% +% % +% Prologue % +% % +%******************************************************************************% + +\begin{document} +\title{La Norme} +\subtitle{Version 3} + +\summary +{ + Ce document décrit La Norme C en vigueur à 42. Une norme de programmation + définit un ensemble de règles régissant l’écriture d’un code. + La Norme s'applique par défaut à tous les projets C du + Cercle Intérieur, et à tout projet où elle est spécifiée. + +} + +\maketitle + +\tableofcontents + + + +%******************************************************************************% +% % +% Avant-propos % +% % +%******************************************************************************% +\chapter{Avant-propos} + + La Norminette est en Python et est open source.\\ + Vous pouvez en consulter les sources ici : https://github.com/42School/norminette.\\ + Les Pull Requests, suggestions et Issues sont les bienvenues ! + +%******************************************************************************% +% % +% Norme % +% % +%******************************************************************************% +\chapter{La Norme} + + +%******************************************************************************% +% Conventions de denomination % +%******************************************************************************% + \section{Conventions de dénomination} + + \begin{itemize} + + \item Un nom de structure doit commencer par + \texttt{s\_}. + + \item Un nom de typedef doit commencer par + \texttt{t\_}. + + \item Un nom d’union doit commencer par \texttt{u\_}. + + \item Un nom d’enum doit commencer par \texttt{e\_}. + + \item Un nom de globale doit commencer par \texttt{g\_}. + + \item Les noms de variables, de fonctions doivent être composés exclusivement de + minuscules, de chiffres et de '\_' (Unix Case). + + \item Les noms de fichiers et de répertoires doivent être composés exclusivement de + minuscules, de chiffres et de '\_' (Unix Case). + + \item Les caractères ne faisant pas partie de la table ASCII standard + ne sont pas autorisés. + + \item Les variables, fonctions, et tout autre identifiant doivent être en Snake Case. + ( En minuscules et en les séparant par des underscore ) + + \item Tous les identifiants (fonctions, macros, types, variables, etc) + doivent être en anglais. + + \item Les objets (variables, fonctions, macros, types, fichiers ou répertoires) + doivent avoir les noms les plus explicites ou mnémoniques possibles. + + \item L'utilisation de variables globales qui ne sont pas \texttt{const} et \texttt{static} est interdite + dans un projet où ce n’est pas explicitement autorisé. + + \item Le fichier doit être compilable. Un fichier qui ne compile pas n'est + pas censé passer La Norme. + \end{itemize} +\newpage + +%******************************************************************************% +% Formatage % +%******************************************************************************% + \section{Formatage} + + \begin{itemize} + + \item Vous devez indenter votre code avec des tabulations de la taille de 4 espaces. + Ce n’est pas équivalent à 4 espaces, ce sont bien des tabulations. + + \item Chaque fonction doit faire au maximum 25 lignes + sans compter les accolades du bloc de la fonction. + + \item Chaque ligne ne peut pas faire plus de 80 colonnes, commentaires compris. + Une tabulation ne compte pas pour une colonne, mais bien pour les \texttt{n} espaces qu’elle représente. + + \item Chaque fonction doit être séparée par une ligne vide de la suivante. + Tout commentaire ou préprocesseur peut se trouver juste au-dessus + de la fonction. Le saut de ligne se trouve après la fonction précédente. + + \item Une seule instruction par ligne + + \item Une ligne vide doit être vide. Elle ne doit pas contenir d’espace ou de tabulation. + + \item Une ligne ne doit jamais se terminer par des espaces ou des tabulations. + + \item Vous ne pouvez pas avoir 2 espaces à la suite. + + \item Quand vous rencontrez une accolade, ouvrante ou fermante, + ou une fin de structure de contrôle, vous devez retourner à la ligne. + + \item Chaque virgule ou point-virgule doit être suivi d’un espace, sauf en fin de ligne. + + \item Chaque opérateur et opérande doivent être séparés par un seul espace. + + \item Chaque mot-clé en C doit être suivi d’un espace, sauf pour ceux + de type (comme \texttt{int}, \texttt{char}, \texttt{float}, etc.) ainsi que \texttt{sizeof}. + + \item Chaque déclaration de variable doit être indentée sur la même colonne. + + \item Les étoiles des pointeurs doivent être collées au nom de la variable. + + \item Une seule déclaration de variable par ligne + + \item On ne peut faire une déclaration et une initialisation sur une même ligne, + à l’exception des variables globales (quand elles sont permises) et des variables statiques. + + \item Les déclarations doivent être en début de fonction et doivent être séparées + de l’implémentation par une ligne vide. + + \item Aucune ligne vide ne doit être présente au milieu des déclarations ou de l’implémentation. + + \item La multiple assignation est interdite. + + \item Vous pouvez retourner à la ligne lors d’une même instruction ou structure de + contrôle, mais vous devez rajouter une indentation par accolade ou opérateur + d’affectation. Les opérateurs doivent être en début de ligne. + + \item Les structures de contrôle (\texttt{if}, \texttt{while}...) doivent avoir des accolades, à moins qu'elles + ne contiennent une seule ligne. + + \item Les accolades qui suivent les fonctions, les déclarateurs ou les structures de contrôle doivent être précédées et suivies d'une nouvelle ligne. + + \end{itemize} + + Exemple: + \begin{42ccode} +int g_global; +typedef struct s_struct +{ + char *my_string; + int i; +} t_struct; +struct s_other_struct; + +int main(void) +{ + int i; + char c; + + return (i); +} + \end{42ccode} + \newpage + +%******************************************************************************% +% Parametres de fonction % +%******************************************************************************% + \section{Fonctions} + + \begin{itemize} + + \item Une fonction prend au maximum 4 paramètres nommés. + + \item Une fonction qui ne prend pas d’argument doit explicitement être prototypée + avec le mot \texttt{void} comme argument. + + \item Les paramètres des prototypes de fonctions doivent être nommés. + + \item Chaque définition de fonction doit être séparée par une ligne vide de la suivante. + + \item Vous ne pouvez déclarer que 5 variables par bloc au maximum. + + \item Le retour d’une fonction doit se faire entre parenthèses. + + \item Chaque fonction doit avoir une seule tabulation entre son type + de retour et son nom. + + \begin{42ccode} +int my_func(int arg1, char arg2, char *arg3) +{ + return (my_val); +} + +int func2(void) +{ + return ; +} + \end{42ccode} + + \end{itemize} + \newpage + + +%******************************************************************************% +% Typedef, struct, enum et union % +%******************************************************************************% + \section{Typedef, struct, enum et union} + + \begin{itemize} + + \item Vous devez mettre une tabulation lorsque vous déclarez une \texttt{struct}, \texttt{enum} ou \texttt{union}. + + \item Lors de la déclaration d’une variable de type \texttt{struct}, \texttt{enum} ou \texttt{union}, + vous ne mettrez qu’un espace dans le type. + + \item Lorsque vous déclarez une \texttt{struct}, \texttt{union} ou \texttt{enum} avec un \texttt{typedef}, + toutes les règles s’appliquent. + + \item Les noms de \texttt{typedef} doivent être précédés d'une tabulation. + + \item Vous devez indenter tous les noms de structures sur la même colonne. + + \item Vous ne pouvez pas déclarer une structure dans un fichier .c. + + \end{itemize} + \newpage + + +%******************************************************************************% +% Headers % +%******************************************************************************% + \section{Headers} + + \begin{itemize} + + \item Seuls les inclusions de headers (système ou non), + les déclarations, les \texttt{defines}, les prototypes et les macros + sont autorisés dans les fichiers headers. + + \item Tous les \texttt{includes} doivent se faire au début du fichier. + + \item Vous ne pouvez pas inclure de fichier C. + + \item On protègera les headers contre la double inclusion. Si le fichier est + \texttt{ft\_foo.h}, la macro témoin est \texttt{FT\_FOO\_H}. + + \item Une inclusion de header (.h) dont on ne se sert pas est interdite. + + \item Toute inclusion de header doit être justifiée autant dans un \texttt{.c} + que dans un \texttt{.h}. + + \end{itemize} + + \begin{42ccode} +#ifndef FT_HEADER_H +# define FT_HEADER_H +# include +# include +# define FOO "bar" + +int g_variable; +struct s_struct; + +#endif + \end{42ccode} + \newpage + +%******************************************************************************% +% Macros et pre-processeur % +%******************************************************************************% + \section{Macros et Préprocesseur} + + \begin{itemize} + + \item Les constantes de préprocesseur (or \texttt{\#define}) que vous créez ne doivent + être utilisées que pour associer des valeurs littérales et constantes, et rien d’autre. + \item Les \texttt{\#define} érigés dans le but de contourner la norme et/ou obfusquer + du code interdit par la norme sont interdites. + Ce point doit être vérifiable par un humain. + \item Vous pouvez utiliser les macros présentes dans les bibliothèques standards, + si ces dernières sont autorisées dans le projet ciblé. + \item Les macros multilignes sont interdites. + \item Seuls les noms de macros sont en majuscules. + \item Il faut indenter les caractères qui suivent un \texttt{\#if}, \texttt{\#ifdef} + ou \texttt{\#ifndef}. + + \end{itemize} + \newpage + + +%******************************************************************************% +% Choses interdites ! % +%******************************************************************************% + \section{Choses Interdites !} + + \begin{itemize} + + \item Vous n’avez pas le droit d’utiliser : + + \begin{itemize} + + \item \texttt{for} + \item \texttt{do...while} + \item \texttt{switch} + \item \texttt{case} + \item \texttt{goto} + + \end{itemize} + + \item Les opérateurs ternaires, comme \texttt{?}. + + \item Les tableaux à taille variable (VLA - Variable Length Array). + + \item Les types implicites dans les déclarations de variable. + + \end{itemize} + \begin{42ccode} + int main(int argc, char **argv) + { + int i; + char string[argc]; // Tableau a taille variable (VLA) + + i = argc > 5 ? 0 : 1 // Ternaire + } + \end{42ccode} + \newpage + +%******************************************************************************% +% Commentaires % +%******************************************************************************% + \section{Commentaires} + + \begin{itemize} + + \item Il ne doit pas y avoir de commentaires dans le corps des fonctions. + Les commentaires doivent se trouver à la fin d'une ligne ou sur leur propre ligne. + + \item Vos commentaires doivent être en anglais et utiles. + + \item Les commentaires ne peuvent pas justifier une fonction bâtarde. + + \end{itemize} + \newpage + + +%******************************************************************************% +% Les fichiers % +%******************************************************************************% + \section{Les fichiers} + + \begin{itemize} + + \item Vous ne pouvez pas inclure un \texttt{.c}. + + \item Vous ne pouvez pas avoir plus de 5 définitions de fonctions dans un \texttt{.c}. + + \end{itemize} + \newpage + + +%******************************************************************************% +% Makefile % +%******************************************************************************% + \section{Makefile} + + Les Makefile ne sont pas vérifiés pas La Norminette. + Ils doivent être vérifiés par un humain pendant l'évaluation. + \begin{itemize} + + \item Les règles \texttt{\$(NAME)}, \texttt{clean}, \texttt{fclean}, \texttt{re} et \texttt{all} + sont obligatoires. + + \item Le projet est considéré comme non fonctionnel si le Makefile "relink". + + \item Dans le cas d’un projet multibinaire, en plus des règles précédentes, + vous devez avoir une règle \texttt{all} compilant les deux binaires ainsi qu’une règle + spécifique à chaque binaire compilé. + + \item Dans le cas d’un projet faisant appel à une bibliothèque de fonctions + (par exemple une \texttt{libft}), votre makefile doit compiler + automatiquement cette bibliothèque. + + \item Les sources nécessaires à la compilation de votre programme doivent être + explicitement citées dans votre Makefile. + + \end{itemize} + + + +\end{document} +%******************************************************************************% diff --git a/pdf/ja.norm.pdf b/pdf/ja.norm.pdf new file mode 100644 index 00000000..812a64bd Binary files /dev/null and b/pdf/ja.norm.pdf differ diff --git a/pdf/ja.norm.pdf.version b/pdf/ja.norm.pdf.version new file mode 100644 index 00000000..8a36cd14 --- /dev/null +++ b/pdf/ja.norm.pdf.version @@ -0,0 +1 @@ +4.1 \ No newline at end of file diff --git a/pdf/ja.norm.tex b/pdf/ja.norm.tex new file mode 100644 index 00000000..cf5b3cc7 --- /dev/null +++ b/pdf/ja.norm.tex @@ -0,0 +1,521 @@ +\documentclass{42-ja} +\newcommand\qdsh{\texttt{42sh}} + +%******************************************************************************% +% % +% 序章 % +% % +%******************************************************************************% + +\begin{document} +% \begin{CJK}{UTF8}{ipxg} +\title{The Norm} +\subtitle{Version 4.1} + +\summary +{ + このドキュメントは、コーディングの際に従うべき一連の規則を定義した、42 において適用される + プログラミング規格 (Norm) について説明したものである。 + Norm は、Common Core 内のすべての C言語プロジェクトには無条件に適用され、このほか Norm + が適用される旨の記載があれば、それらのプロジェクトにも適用される。 +} + +\maketitle + +\tableofcontents + + + +%******************************************************************************% +% % +% 前書き % +% % +%******************************************************************************% +\chapter{前書き} + +\texttt{norminette} は、ソースコードが Norm を遵守しているかを判定するためのオープンソース + ソフトウェアな Python プログラムである。このプログラムは、Norm で規定した多くの制約について + 検査するが、全部ではない(例えば主観が関わるもの)。キャンパス固有に規則を改変していない限りは、 + レビューにおいて \texttt{norminette} が優先される。 以下のページでは、\texttt{norminette} + では確認できない検査項目を \textit{(*)}印で示しており、レビュワーがコードレビュー中に発見した + 場合には、そのプロジェクトは不合格になり得る( Norme フラグを使用する)。\\ + +リポジトリは \texttt{https://github.com/42School/norminette} から利用可能である。\\ + +プルリクエスト、提案、バグ報告も受け付けている。 + +\newpage + + +%******************************************************************************% +% +% 教育学観点での説明 % +% +%******************************************************************************% + \chapter{目的} + + Norm は教育上の多くの要求を満たすために丁寧に作られている。すべての理由の中で最も重要なものを + 以下に示す。: + \begin{itemize} + + \item 優先順位づけ:コーディングとは、大きくて複雑なタスクを、一連の単純な処理に分割する + ことを意味する。 これらすべての処理は次から次へと一つずつ順次実行される。ソフトウェア作りを + 始めたばかりの初心者は、すべの個別の処理と正確な実行順序を完全に理解しながら、単純かつ明快 + なアーキテクチャを必要とする。複数の処理をあたかも同時に行うような難解な構文のコードは理解 + しづらく、ソースコードの同じ区画に複数のタスクを割り当てるような関数はエラーの原因と + なりうる。\\ + Norm は、コード各部分に固有のタスクが明確に理解及び検証でき、実行される一連の処理に疑いの + 余地を残さないような、複雑でないコードを作成することを求めている。 + そのため、関数内の行数を最大 25 行に制限しており、\texttt{for} や \texttt{do...while}、 + 三項演算子の使用を禁止している。 + + \item 見た目と印象(スタイル):普段のピア学習やピアレビューにおいて友人や同僚とコードを + 共有する際、コードを解読することに時間を費すよりも、コードのロジックについて直接議論できた方が + 良い。\\ + Norm は、関数や変数の命名、インデント、括弧の規則、様々な場所におけるタブや空白、その他 + に関して、特定のスタイルを使用することを求めている。これにより、他の人のコードをスムーズに + 確認でき、理解の前段階のコードを読むことに時間を費すことなく、直接要点をつかむことができる。 + Norm はまた、トレードマークとしても機能する。あなたがいずれ労働市場に出た際に、42コミュニティ + の一員として、他の 42 学生や 42 出身者が書いたコードを認識できるようになる。 + + \item 長期的な視点:理解しやすいコードを書く努力をすることは、それを改修するための最善の方法 + である。あなたを含め誰かがバグ修正や機能追加をしなければならない際に、以前に正しい方法で + 物事を行っていれば、何をしているのかを理解するために貴重な時間を失うことはない。これにより、 + 時間がかかるという理由だけでコードの保守が停止するような状況を避けることができ、市場で成功 + する製品を持つことにおいて、違いを生む。このことを早く学ぶほど良い。 + + \item 参考文献:Norm に含まれる規則の一部またはすべてが恣意的だと思うかも知れないが、何を + すべきか、どのようにすべきかについて私たちは実際に考え、勉強を重ねてきた。何故関数が短く単一 + の処理のみを行うべきなのか、何故変数名が意味を持つべきなのか、何故各行を 80 文字以下に + すべきか、何故関数が取る引数の数を抑えるべきなのか、何故コメントが有用であるべきかなどに + ついて、Google で検索することを強く推奨する。 + + \end{itemize} + + +\newpage + +%******************************************************************************% +% % +% Norm % +% % +%******************************************************************************% +\chapter{The Norm} + + +%******************************************************************************% +% 命名規則 % +%******************************************************************************% + \section{命名規則} + + \begin{itemize} + + \item 構造体 (struct) 名の先頭は \texttt{s\_} とする。 + + \item typedef 名の先頭は \texttt{t\_} とする。 + + \item 共用体 (union) 名の先頭は \texttt{u\_} とする。 + + \item 列挙型 (enum) 名の先頭は \texttt{e\_} とする。 + + \item グローバル変数名の先頭は \texttt{g\_} とする。 + + \item 変数、関数名、ユーザー定義型などの識別子は、小文字、数字、アンダースコア + のみを含むものとする(スネークケース)。大文字は使用しない。 + + \item ファイル名とディレクトリ名は、小文字、数字およびアンダースコアのみを + 含むものとする(スネークケース)。 + + \item ASCII コード表に含まれない文字は、リテラル文字列を除き使用不可とする。 + + \item \textit{(*)} すべての識別子(関数、型、変数など)の名前は、英語として + 読めるものかつ明示的又は覚えやすいものとし、各単語はアンダースコアで区切るもの + とする。この規則は、はマクロ、ファイル名、ディレクトリにも適用される。 + + \item \texttt{const} または \texttt{static} でマークされていないグローバル + 変数の使用は禁止とし、Norm エラーとみなされる(ただし、プロジェクトで明示的に + 許可されている場合は除く) + + \item ファイルはコンパイル可能である必要がある。コンパイルできないファイルは + Norm の基準に不適合とみなされる。 + + \end{itemize} +\newpage + +%******************************************************************************% +% フォーマット % +%******************************************************************************% + \section{フォーマット} + + \begin{itemize} + + \item 各関数は、関数自身の波括弧記号の行を除き、最大 25 行とする。 + + \item 各行は、コメントを含めて最大 80 文字とする。但し、水平タブ記号は1文字として + カウントされるのではなく、それが表す半角スペースの文字数分がカウントされる。 + + \item 関数の前後は空行で区切る。関数間にコメントやプリプロセッサ命令を挿入する + ことは許容されるが、少なくとも1つの空行をそれらの間に挿入するものとする。 + + \item コードは、4文字分の長さの水平タブ記号(ASCIIコード表で \textit{0x09} + にマップされる)でインデントする。これは4つの半角空白記号 (\textit{0x20} に + マップされる) とは異なる。\texttt{norminette} によって検証される適切な + インデントを視覚的に得るために、コードエディタが正しく設定されているか確認する + こと。 + + \item 波括弧内のブロックはインデントする。波括弧はそれ単独の行とする。ただし、 + struct、enum、union の宣言部分は除く。 + + \item 空行は文字を含んではならない。即ち、空白記号やタブ記号を含んではならない。 + + \item 行末が空白記号や水平タブ記号であってはならない。 + + \item 空行は連続してはならない。 + 空白記号は連続してはならない。 + + \item 宣言は関数の冒頭に配置する。 + + \item 変数名は、そのスコープ内ですべて同じ列にインデントする。 + 注意:型名は、それが含まれるブロックごとに既にインデントされた状態である。 + + \item ポインタに付随するアスタリスク記号は、その変数名に隣接させる。 + 即ち、アスタリスク記号と変数名の間に空白記号または水平タブ記号を挿入してはならない。 + + \item 変数宣言は、一行につき1つの変数とする。 + + \item 宣言と初期化を同一行内で行ってはならない。 + ただし、グローバル変数(許可されている場合)、静的変数、定数を除く。 + + \item 関数内の変数宣言と関数の残りの部分との間には、空行を1行挿入する。 + 関数内に他の空行を入れてはならない。 + + \item 一行につき1つの命令または制御構造 (\texttt{if} や \texttt{while}) のみ + が許可される。 + 例えば、制御構造内での代入は禁止とし、同一行内での2つ以上の代入も禁止とし、 + 制御構造の終端は必ず改行する。 + + \item 命令または制御構造を、必要に応じて複数行に分割しても良い。 + 追加される行は最初の行と比較してインデントされ、演算子の前後で改行する場合は + 演算子を前の行の末尾ではなく新しい行の先頭に配置する。 + + \item 行末を除き、各カンマ記号またはセミコロン記号の後には空白記号が必要である。 + + \item 各演算子または被演算子は、空白記号で区切られるものとする。 + + \item 各 C キーワードの後には単一の空白記号を挿入する。 + ただし、型名(\texttt{int}、\texttt{char}、\texttt{float}等)および \texttt{sizeof} 演算子を除く。 + + \item 制御構造(\texttt{if} や \texttt{while})内は、単一行に単一の命令を含む場合を除き、 + 波括弧で囲むものとする。 + + \end{itemize} + +\vspace{1cm} + + 一般的な例: + \begin{42ccode} +int g_global; +typedef struct s_struct +{ + char *my_string; + int i; +} t_struct +struct s_other_struct; + +int main(void) +{ + int i; + char c; + + return (i); +} + \end{42ccode} + \newpage + +%******************************************************************************% +% 関数のパラメータ % +%******************************************************************************% + \section{関数のパラメータ} + + \begin{itemize} + + \item 関数が引数を取る場合、最大4つの名前付きパラメータとする。 + + \item 関数が引数を取らない場合、プロトタイプの引数に \texttt{void} キーワードを + 明示的に記述する。 + + \item 関数のプロトタイプのパラメータには名前が必要である。 + + \item 各関数内に宣言可能な変数は、最大5つとする。 + + \item 関数の \texttt{return} 文は、戻り値を丸括弧で囲むものとする。ただし、 + 戻り値を返さない関数を除く。 + + \item 関数のプロトタイプでは、その戻り値の型と関数名の間に単一の水平タブ記号を + 挿入する。 + + \end{itemize} + +\vspace{1cm} + + \begin{42ccode} +int my_func(int arg1, char arg2, char *arg3) +{ + return (my_val); +} + +int func2(void) +{ + return ; +} + \end{42ccode} + + \newpage + + +%******************************************************************************% +% typedef, struct, enum and union % +%******************************************************************************% +\section{typedef、構造体 (struct)、列挙体 (enum)、共用体 (union)} + +\begin{itemize} + +\item 他の C キーワードと同様、struct 宣言する際は \texttt{struct} と名前の間に単一の半角 + 空白記号を挿入する。enum および union においても同様とする。 + +\item struct 型の変数を宣言する際は、通常の型の変数と同様にインデントする。enum および union + においても同様とする。 + +\item struct、enum、union の波括弧内では、他のブロックと同様に通常のインデントを適用する。 + +\item 他の C キーワードと同様、\texttt{typedef} の後に単一の半角空白記号を挿入し、新しく + 定義された名前に通常のインデントを適用する。 + +\item すべての構造体の名前は、そのスコープ内で同じ列にインデントする。 + +\item .c ファイル内で、構造体を宣言してはならない。 + +\end{itemize} +\newpage + + + +%******************************************************************************% +% ヘッダーファイル % +%******************************************************************************% +\section{ヘッダーファイル} + +\begin{itemize} + + \item \textit{(*)} ヘッダーファイルで許可される要素: + ヘッダーファイルのインクルード(システムまたはそれ以外)、宣言、プリプロセッサ命令、 + プロトタイプ、マクロ。 + + \item インクルードはすべてファイルの先頭で行うものとする。 + + \item ヘッダーファイルまたは .c ファイル内で、他の .c ファイルをインクルードしてはならない。 + + \item ヘッダーファイルをインクルードする際は、必ず二重インクルードから保護する。 + ファイル名が \texttt{ft\_foo.h} の場合、そのインクルードガード用のマクロ名は + \texttt{FT\_FOO\_H} である。 + + \item \textit{(*)} 未使用のヘッダーファイルをインクルードしてはならない。 + + \item ヘッダーファイルのインクルードは、.c ファイルと .h ファイル自体でコメントを使用して + 正当化することができる。 + +\end{itemize} + +\vspace{1cm} + + \begin{42ccode} +#ifndef FT_HEADER_H +# define FT_HEADER_H +# include +# include +# define FOO "bar" + +int g_variable; +struct s_struct; + +#endif + \end{42ccode} + \newpage + + +%******************************************************************************% +% 42ヘッダー % +%******************************************************************************% + + \section{42ヘッダー - スタイルのあるファイルの開始} + + \begin{itemize} + + \item すべての .c と .h ファイルの先頭には、42ヘッダーを配置する(有用な情報を含む + 特別な形式の複数行コメント)。42ヘッダーは標準でクラスターの PC の様々なテキスト + エディタ(emacs: \texttt{C-c C-h} を入力、vim: \texttt{:Stdheader} または + \texttt{F1} を入力、など)で利用可能である。 + + \item \textit{(*)} 42ヘッダーには、作成者(login 名と学生メール + (\textit{login@student.42tokyo.jp}))、作成および最終更新の login 名と日時を + 含む情報を含めるものとする。 + ファイルがディスクに保存される度に、情報が自動的に更新されるべきである。 + + \end{itemize} + \info{ + デフォルトの 42ヘッダーは、自動的にはあなたの個人情報が設定されない可能性がある。 + 上記の規則に従うためには、設定する必要があるかも知れない。 + } + + \newpage + + +%******************************************************************************% +% マクロとプリプロセッサ % +%******************************************************************************% + \section{マクロとプリプロセッサ} + + \begin{itemize} + + \item \textit{(*)} プリプロセッサ定数(または \texttt{\#define})の作成は、 + リテラルまたは定数値のために使用するもののみとする。 + + \item \textit{(*)} Norm を回避、またはコードを難読化する目的で + \texttt{\#define} を用いてはならない。 + + \item \textit{(*)} 標準ライブラリから利用可能なマクロは、それらが + プロジェクトで許可されている範囲でのみ使用しても良い。 + + \item 複数行にわたるマクロを記述してはならない。 + + \item マクロ名はすべて大文字とする。 + + \item \texttt{\#if}、\texttt{\#ifdef}、または \texttt{\#ifndef} + ブロック内のプリプロセッサ命令はインデントする。 + + \item グローバルスコープの外では、プリプロセッサ命令を使用してはならない。 + + \end{itemize} + \newpage + + +%******************************************************************************% +% ! 禁止事項 ! % +%******************************************************************************% + \section{禁止事項!} + + \begin{itemize} + + \item 下記を使用してはならない: + + \begin{itemize} + + \item \texttt{for} + \item \texttt{do...while} + \item \texttt{switch} + \item \texttt{case} + \item \texttt{goto} + + \end{itemize} + + \item '?' のような三項演算子 + + \item 可変長配列 (VLA) + + \item 変数宣言における暗黙的な型 + + \end{itemize} + +\vspace{1cm} + +\begin{42ccode} + int main(int argc, char **argv) + { + int i; + char str[argc]; // This is a VLA + + i = argc > 5 ? 0 : 1 // Ternary + } + \end{42ccode} + \newpage + +%******************************************************************************% +% コメント % +%******************************************************************************% + \section{コメント} + + \begin{itemize} + + \item コメントは、関数内に配置してはならない。 + コメントは、行末に、またはコメント単独の行として配置するものとする。 + + \item \textit{(*)} コメントは英語で記述し、有用である必要がある。 + + \item \textit{(*)} コメントは、キャリーオールまたは悪い関数の作成を正当化する手段 + にはならない。 + + \end{itemize} + + \warn{ + 一般的にキャリーオールまたは悪い関数は、関数名においては f1、f2... のような、 + 変数名においては a、b、c... のような明示的でない名前を伴う。 + 特異的で論理的な理由なく、 Norm の回避のみを目的とした関数も、悪い関数とみなされる。 + それぞれが明確で単純なタスクを達成する、明確で読みやすい関数を持ったコードが + 望ましいことを心に留めて欲しい。ワンライナーのようなコード難読化テクニックは + 避けること。 + } + \newpage + + +%******************************************************************************% +% ファイル % +%******************************************************************************% + \section{ファイル} + + \begin{itemize} + + \item .c ファイルで .c ファイルをインクルードしてはならない。 + + \item 一つの .c ファイルに含めることができる関数定義は、最大5つとする。 + + \end{itemize} + \newpage + + +%******************************************************************************% +% Makefile % +%******************************************************************************% + \section{Makefile} + + Makefile は \texttt{norminette} によって検査されず、評価ガイドラインで + 要求された場合、レビュー中にレビュワーが直接確認する必要がある。特に指示がない限り、 + 下記の規則が Makefile に適用される: + \begin{itemize} + + \item \textit{\$(NAME)}、\textit{clean}、\textit{fclean}、\textit{re}、 + \textit{all} ルールを必須とする。\textit{all} ルールをデフォルトの + ルールとし、単に \texttt{make} と入力したときに \textit{all} ルールが + 実行される必要がある。 + + \item 再コンパイルまたは再リンク不要にもかかわらず Makefile が再リンクする + 場合、そのプロジェクトは要件を満たしていないとみなされる。 + + \item マルチバイナリプロジェクトの場合、上記に加えて各バイナリのルール + (例:\textit{\$(NAME\_1)}、\textit{\$(NAME\_2)}、...)も作成する。 + \textit{all} ルールは、各バイナリルールを使用してすべてのバイナリを + コンパイルする。 + + \item 非システムライブラリ(例:\texttt{libft})がソースコードと共に存在し、 + そのライブラリから関数を呼び出すプロジェクトの場合、Makefile はこの + ライブラリも含めて自動的にコンパイルする必要がある。 + + \item プロジェクトをコンパイルするために必要なすべてのソースファイルについて、 + Makefile に名前を明示的に列挙するものとする。即ち、\texttt{*.c}、 + \texttt{*.o} 等で記述してはならない。 + + \end{itemize} + + +% \end{CJK} +\end{document} +%******************************************************************************% diff --git a/pdf/ko.norm.pdf b/pdf/ko.norm.pdf new file mode 100644 index 00000000..a807d9e0 Binary files /dev/null and b/pdf/ko.norm.pdf differ diff --git a/pdf/ko.norm.pdf.version b/pdf/ko.norm.pdf.version new file mode 100644 index 00000000..56a6051c --- /dev/null +++ b/pdf/ko.norm.pdf.version @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/pdf/ko.norm.tex b/pdf/ko.norm.tex new file mode 100644 index 00000000..3fc362e8 --- /dev/null +++ b/pdf/ko.norm.tex @@ -0,0 +1,475 @@ +\documentclass{42-ko} + + + +%******************************************************************************% +% % +% 프롤로그 % +% % +%******************************************************************************% + +\begin{document} +\title{The Norm} +\subtitle{Version 3} + +\summary +{ + 본 문서는 42에서 적용 가능한 표준(Norm)을 설명합니다. + 프로그래밍 표준은 코드를 작성할 때에 따라야하는 규칙들을 정의합니다. + Norm은 기본적으로 커먼 코어 내의 모든 C 프로젝트와 + 지정된 모든 프로젝트에 적용됩니다. +} + +\maketitle + +\tableofcontents + + + +%******************************************************************************% +% % +% 머리말 % +% % +%******************************************************************************% +\chapter{머리말} + + \texttt{norminette}은 파이썬으로 작성되었으며 오픈 소스입니다. \\ + 리포지터리는 다음 주소에서 확인할 수 있습니다. https://github.com/42School/norminette \\ + 풀 리퀘스트, 제안과 이슈를 환영합니다! + + \newpage + + + %******************************************************************************% + % + % 교육적 설명 % + % + %******************************************************************************% + \chapter{Why ?} + + Norm은 많은 교육학적 요구를 충족시키기 위해 신중하게 만들어졌습니다. 아래의 + 모든 선택에 대한 이유는 다음과 같습니다. + \begin{itemize} + + \item 시퀀싱: 코딩은 크고 복잡한 작업을 긴 일련의 기본 명령으로 나누는 것을 의미합니다. + 이 모든 명령은 차례대로 실행될 것입니다. 소프트웨어를 만들기 시작하는 초보자는 프로젝트를 위해 + 모든 개별 명령과 정확한 실행 순서를 완전히 이해한 단순하고 명확한 아키텍처가 필요합니다. + 동시에 여러 명령을 수행하는 암호 언어 구문은 분명히 혼란스러우며 코드의 동일한 부분에 혼합된 + 여러 작업을 처리하려는 함수는 오류의 원인입니다.\\ + The Norm은 각 조각의 고유한 작업을 명확하게 이해하고 검증할 수 있고, 실행된 모든 명령의 순서가 + 의심의 여지가 없는 곳에 간단한 코드 조각을 만들 것을 요구합니다. 이것이 우리가 함수에서 + 최대 25줄을 요구하는 이유이고, 또한 \texttt{for}, \texttt{do .. while}, 또는 삼항 연산자를 + 금지하는 이유입니다. + + \item 룩 앤드 필: 일반적인 동료 학습 과정과 동료 평가 중에 친구 및 동료와 교환하는 동안 + 코드를 해독하는 데 시간을 쓰지 않고 코드 조각의 논리에 대해 직접 이야기하고 싶어 합니다.\\ + The Norm은 많은 곳에서 함수와 변수의 이름 지정, 들여쓰기, 중괄호 규칙, 탭 및 띄어쓰기에 + 대한 지침을 제공하는 특정 룩 앤드 필을 사용하도록 요구합니다. 이것은 익숙해 보이는 + 다른 사람의 코드를 부드럽게 살펴볼 수 있으며 코드를 이해하기 전에 코드를 읽는 데 시간을 쓸 필요 + 없이 바로 요점을 파악할 수 있게 합니다. The Norm은 또한 트레이드마크입니다. 42 커뮤니티의 + 일원으로서, 노동시장에 있을 때 다른 42학생이나 졸업생들이 작성한 코드를 알아볼 수 있을 것입니다. + + \item 장기 비전: 이해할 수 있는 코드를 작성하려는 노력은 그것을 유지하는 가장 좋은 방법입니다. + 당신을 포함한 다른 누군가가 버그를 수정하거나 새로운 기능을 추가해야 할 때마다 이전에 올바른 + 방식으로 작업을 수행했다면 그것이 무엇을 하는지 알아내려고 귀중한 시간을 낭비할 필요가 없습니다. + 이렇게 하면 시간이 오래 걸린다는 이유만으로 코드 조각들이 더 이상 유지되지 않는 상황을 피할 수 있고, + 시장에서 성공적인 제품을 가지고 있는 것에 대해 말할 때 차이를 만들 수 있습니다. 그렇게 하는 법은 + 빨리 배울수록 좋습니다. + + \item 표준: 당신은 어떤 규칙이나 모든 규칙이 임의적이라고 생각할 수도 있지만, 우리는 실제로 무엇을 어떻게 + 해야 할지 생각하고 읽었습니다. 우리는 왜 함수가 짧고 한 가지 일만 해야 하는지, 변수 이름이 의미가 있어야 + 하는지, 줄 너비가 80열을 넘지 않아야 하는지, 함수가 많은 매개 변수를 사용하지 않아야 하는지, 주석이 + 유용해야 하는 이유 등을 구글에서 검색하는 것을 강력히 권장합니다. + + \end{itemize} + + + \newpage + +%******************************************************************************% +% % +% The Norm % +% % +%******************************************************************************% +\chapter{The Norm} + + +%******************************************************************************% +% 명명 규칙 % +%******************************************************************************% + \section{명명} + + \begin{itemize} + + \item 구조체의 이름은 \texttt{s\_} 로 시작해야만 합니다. + + \item typedef의 이름은 \texttt{t\_} 로 시작해야만 합니다. + + \item 공용체(union)의 이름은 \texttt{u\_} 로 시작해야만 합니다. + + \item 열거형(enum)의 이름은 \texttt{e\_} 로 시작해야만 합니다. + + \item 전역 변수의 이름은 \texttt{g\_} 로 시작해야만 합니다. + + \item 변수와 함수의 이름에는 소문자, 숫자 및 + '\_' (Unix Case)만이 포함될 수 있습니다. + + \item 파일 및 디렉터리의 이름에는 소문자, 숫자 및 + '\_' (Unix Case)만이 포함될 수 있습니다. + + \item 표준 ASCII 코드표에 없는 문자는 금지됩니다. + + \item 변수, 함수 및 기타 식별자는 스네이크 케이스를 사용해야 합니다. + 대문자는 없고 각 단어는 밑줄 문자로 구분됩니다. + + \item 모든 식별자(함수, 매크로, 자료형, 변수 등)는 영어여야만 합니다. + + \item 객체(변수, 함수, 매크로, 자료형, 파일 또는 디렉터리)는 + 가능한 가장 명시적이거나 가장 연상되는 이름을 가져야 합니다. + + \item 프로젝트에서 명시적으로 허용하지 않는 한 + 상수(const) 및 정적(static)이 아닌 전역 변수 선언은 금지되며 + Norm 오류로 간주됩니다. + + \item 파일은 컴파일이 가능해야 합니다. 컴파일되지 않는 파일은 + Norm을 통과할 수 없을 것입니다. + \end{itemize} +\newpage + +%******************************************************************************% +% 서식 % +%******************************************************************************% + \section{서식} + + \begin{itemize} + + \item 들여쓰기는 네 칸 크기의 탭으로 이루어져야 합니다. + 일반적인 공백 네 칸이 아니라 진짜 탭을 말합니다. + + \item 각 함수는 함수 자체의 중괄호를 제외하고 + 최대 25줄이어야 합니다. + + \item 각 줄은 주석을 포함해 최대 80자의 열 너비를 가집니다. + 주의: 탭 들여쓰기는 한 열로 계산하지 않으며, + 탭이 해당되는 공백의 수 만큼으로 계산됩니다. + + \item 각 함수는 줄 바꿈으로 구분해야 합니다. + 모든 주석과 전처리기 명령은 함수 바로 위에 있을 수 있습니다. + 줄 바꿈은 이전 함수 다음에 와야 합니다. + + \item 한 줄에 한 명령만이 존재할 수 있습니다. + + \item 빈 줄은 공백이나 탭 들여쓰기 없이 비어 있어야 합니다. + + \item 줄은 공백이나 탭 들여쓰기로 끝날 수 없습니다. + + \item 두 개의 연속된 공백이 있을 수 없습니다. + + \item 모든 중괄호나 제어 구조 뒤는 줄바꿈으로 시작돼야 합니다. + + \item 줄의 끝이 아니라면 모든 콤마와 세미콜론 뒤에는 공백 문자가 + 따라와야 합니다. + + \item 모든 연산자나 피연산자는 하나의 공백으로 구분해야 합니다. + + \item 각 C 키워드 뒤에는 공백이 있어야만 합니다. + 자료형 키워드(int, char, float, 등)와 sizeof는 제외됩니다. + + \item 각 변수 선언은 해당 스코프와 같은 열로 들여쓰기 되어야만 합니다. + + \item 포인터와 함께 쓰이는 별표는 변수 이름에 붙어있어야만 합니다. + + \item 한 줄에 한 개의 변수 선언만이 가능합니다. + + \item 선언과 초기화는 같은 줄에서 작성될 수 없습니다. + 다음 경우에는 제외됩니다. + 전역 변수(허용 될때에), 정적 변수, 그리고 상수. + + \item 선언문은 함수의 처음에 존재해야 합니다. + + \item 함수 내의 변수 선언문과 이후 함수 사이에는 빈 줄이 + 존재해야만 합니다. 다른 빈 줄은 함수 내에서 허용되지 않습니다. + + \item 다중 대입은 엄격하게 금지됩니다. + + \item 명령문이나 제어 구조 다음에 새 줄을 추가할 수도 있습니다. + 그러기 위해서는 들여쓰기와 함께 중괄호나 대입 연산자를 + 추가해야 합니다. 연산자는 줄의 시작에 있어야만 합니다. + + \item 제어문(if, while..)에는 한 줄인 경우를 제외하고 + 중괄호가 존재해야 합니다 + + \item 함수, 선언문, 제어 구조 다음에 오는 중괄호 앞 뒤에는 + 줄바꿈이 있어야만 합니다. + + \end{itemize} + + \newpage + + 일반적인 예시: + \begin{42ccode} +int g_global; +typedef struct s_struct +{ + char *my_string; + int i; +} t_struct; +struct s_other_struct; + +int main(void) +{ + int i; + char c; + + return (i); +} + \end{42ccode} + \newpage + +%******************************************************************************% +% 함수 매개변수 % +%******************************************************************************% + \section{함수} + + \begin{itemize} + + \item 한 함수에는 최대 4개의 명명된 매개변수를 가질 수 있습니다. + + \item 인자를 받지 않는 함수는 "void"라는 단어를 인자로 + 명시적으로 프로토타입 돼야 합니다. + + \item 함수 프로토타입 안의 매개 변수는 명명되어야만 합니다. + + \item 각 함수는 빈 줄로 다음 함수와 구분되어야 합니다. + + \item 각 함수에서 5개를 초과하여 변수를 선언할 수 없습니다. + + \item 함수의 리턴은 괄호 사이에 있어야 합니다. + + \item 각 함수의 리턴 자료형과 함수 이름 사이에는 + 한 번의 탭 들여쓰기가 있어야 합니다. + + \begin{42ccode} +int my_func(int arg1, char arg2, char *arg3) +{ + return (my_val); +} + +int func2(void) +{ + return ; +} + \end{42ccode} + + \end{itemize} + \newpage + + +%******************************************************************************% +% 자료형, 구조체, 열거형(enum)과 공용체(union) % +%******************************************************************************% + \section{자료형, 구조체, 열거형(enum)과 공용체(union)} + + \begin{itemize} + + \item 구조체, 열거형, 공용체를 선언 할 때는 탭 들여쓰기를 넣습니다. + + \item 구조체, 열거형, 공용체의 변수를 선언할 때에는 + 자료형 안에 공백 문자 하나를 넣습니다. + + \item 구조체, 열거형, 공용체를 typedef와 함께 선언할 때에는 + 모든 들여쓰기 규칙이 적용됩니다. + + \item typedef 이름은 탭이 앞에 있어야만 합니다. + + \item 모든 구조체의 이름은 해당 스코프의 같은 열에 들여쓰기 해야만 합니다. + + \item .c 파일 내에서 구조체를 선언할 수 없습니다. + + \end{itemize} + \newpage + + +%******************************************************************************% +% 헤더 % +%******************************************************************************% + \section{헤더 - a.k.a 파일 포함하기} + + \begin{itemize} + + \item 헤더파일에서 허용되는 것들: + 헤더 인클루드(시스템 헤더 또는 유저 헤더), + 선언문, defines, 프로토타입과 매크로. + + \item 모든 인클루드는 파일의 시작에 작성되어야 합니다. + + \item C 파일을 포함할 수 없습니다. + + \item 헤더 파일은 중복 인클루드를 방지해야만 합니다. + 만약 파일 이름이 ft\_foo.h라면 인클루드 가드 매크로 이름은 + FT\_FOO\_H 가 되어야 합니다. + + \item 사용하지 않은 헤더의 인클루드는 금지됩니다. + + \item .c / .h 파일의 모든 헤더 인클루드는 정당한 이유가 있어야만 합니다. + + \end{itemize} + + \begin{42ccode} +#ifndef FT_HEADER_H +# define FT_HEADER_H +# include +# include +# define FOO "bar" + +int g_variable; +struct s_struct; + +#endif + \end{42ccode} + \newpage + + +%******************************************************************************% +% 42 헤더 % +%******************************************************************************% + + \section{42 헤더 - a.k.a 멋지게 파일 시작하기} + + \begin{itemize} + + \item 모든 .c 및 .h 파일은 즉시 표준 42 헤더로 시작해야 합니다. : 유용한 정보를 + 포함하는 특별한 형식의 멀티 라인 주석. 표준 헤더는 다양한 텍스트 편집기를 위한 + 클러스터의 컴퓨터에서 자연스럽게 사용할 수 있습니다. (emacs : \texttt{C-c C-h} 입력, + vim : \texttt{:Stdheader} 또는 \texttt{F1}, 등... 입력). + + \item 42 헤더에는 로그인과 이메일이 포함된 작성자, 생성일, 로그인 및 마지막 업데이트 + 날짜를 포함한 여러 최신 정보가 포함되어야 합니다. 파일이 디스크에 저장될 때마다 + 정보가 자동으로 업데이트되어야 합니다. + + \end{itemize} + \newpage + + +%******************************************************************************% +% 매크로와 전처리기 % +%******************************************************************************% + \section{매크로와 전처리기} + + \begin{itemize} + + \item 매크로 상수(또는 \#define)는 리터럴이나 + 상숫값에만 사용 가능합니다. + \item Norm을 우회하거나 코드 가독성을 낮추는 모든 \#define은 금지됩니다. + 이 부분은 사람에 의해 검사되어야 합니다. + \item 표준 라이브러리의 매크로는 프로젝트에서 + 사용이 허가되었을 경우에만 사용 가능합니다. + \item 여러 줄에 걸친 매크로는 금지됩니다. + \item 매크로 이름은 모두 대문자여야만 합니다. + \item \#if, \#ifdef, \#ifndef 다음 문자들은 들여쓰기 해야만 합니다. + + \end{itemize} + \newpage + + +%******************************************************************************% +% 금지 사항! % +%******************************************************************************% + \section{금지 사항!} + + \begin{itemize} + + \item 다음 구문은 사용이 금지됩니다: + + \begin{itemize} + + \item for + \item do...while + \item switch + \item case + \item goto + + \end{itemize} + + \item 다음과 같은 삼항 연산자 `?'. + + \item VLA - 가변 길이 배열. + + \item 자료형을 명시하지 않은 변수 선언 + + \end{itemize} + \begin{42ccode} + int main(int argc, char **argv) + { + int i; + char string[argc]; // 가변 길이 배열 + + i = argc > 5 ? 0 : 1 // 삼항 연산자 + } + \end{42ccode} + \newpage + +%******************************************************************************% +% 주석 % +%******************************************************************************% + \section{주석} + + \begin{itemize} + + \item 주석은 함수 내부에 있을 수 없습니다. + 주석은 줄 끝에 있거나 별개의 줄에 있어야만 합니다. + + \item 주석은 영어여야만 합니다. 그리고 유용해야만 합니다. + + \item 주석은 "쓰레기 같은" 함수를 정당화할 수 없습니다. + + \end{itemize} + \newpage + + +%******************************************************************************% +% 파일 % +%******************************************************************************% + \section{파일} + + \begin{itemize} + + \item .c 파일을 인클루드할 수 없습니다. + + \item 하나의 .c 파일에 함수를 5개보다 많이 정의할 수 없습니다. + + \end{itemize} + \newpage + + +%******************************************************************************% +% Makefile % +%******************************************************************************% + \section{Makefile} + + Makefile은 Norm에서 확인하지 않으며, 반드시 + 학생이 평가 중에 확인해야만 합니다. + \begin{itemize} + + \item 다음 규칙은 필수적입니다. + \$(NAME), clean, fclean, re and all + + \item Makefile이 리링크(relink)되면, 프로젝트는 + 작동하지 않는 것으로 간주됩니다. + + \item 실행 파일이 여러 개인 프로젝트의 경우, 위의 규칙 이외에도 + 컴파일된 각각의 실행 파일에 대한 특정 규칙 뿐만 아니라 + 실행 파일들을 모두 컴파일하는 규칙이 있어야만 합니다. + + \item 비-시스템 라이브러리(예: \texttt{libft})에서 + 함수를 호출하는 프로젝트의 경우 Makefile은 + 반드시 이 라이브러리를 자동으로 컴파일해야만 합니다. + + \item 프로젝트를 컴파일하기 위해 필요한 모든 소스파일들은 + Makefile에 반드시 명시해야만 합니다. + + \end{itemize} + + +\end{document} +%******************************************************************************% diff --git a/pdf/pt_br.norm.pdf b/pdf/pt_br.norm.pdf new file mode 100644 index 00000000..a5e2ea3d Binary files /dev/null and b/pdf/pt_br.norm.pdf differ diff --git a/pdf/pt_br.norm.pdf.version b/pdf/pt_br.norm.pdf.version new file mode 100644 index 00000000..8a36cd14 --- /dev/null +++ b/pdf/pt_br.norm.pdf.version @@ -0,0 +1 @@ +4.1 \ No newline at end of file diff --git a/pdf/pt_br.norm.tex b/pdf/pt_br.norm.tex new file mode 100644 index 00000000..dda4b775 --- /dev/null +++ b/pdf/pt_br.norm.tex @@ -0,0 +1,541 @@ +\documentclass{42-pt} +\newcommand\qdsh{\texttt{42sh}} + + + +%******************************************************************************% +% % +% Prologue % +% % +%******************************************************************************% + +\begin{document} +\title{A Norma} +\subtitle{Versão 4.1} + +\summary +{ + Este documento descreve o padrão aplicável (Norma) na 42. Um padrão de + programação define um conjunto de regras a seguir ao escrever um código. + A Norma aplica-se a todos os projetos C dentro do Common Core por padrão, e + para qualquer projeto onde é especificado. +} + +\maketitle + +\tableofcontents + + + +%******************************************************************************% +% % +% Introdução % +% % +%******************************************************************************% +\chapter{Introdução} + +A \texttt{norminette} é um código Python e open source que verifica a conformidade +do seu código-fonte com a Norma. Ela verifica muitas restrições da Norma, mas +não todas (por exemplo, restrições subjetivas). A menos que haja regulamentos +locais específicos em seu campus, a \texttt{norminette} prevalece durante +avaliações nos itens controlados. Nas páginas a seguir, as regras que não são +verificadas pela \texttt{norminette} são marcadas com \textit{(*)}, e podem levar +à reprovação do projeto (usando a flag da Norma) se descobertas pelo +avaliador durante uma revisão de código.\\ + +Seu repositório está disponível em https://github.com/42School/norminette.\\ + +Pull requests, sugestões e issues são bem-vindos! + +\newpage + + +%******************************************************************************% +% % +% Explicações Pedago % +% % +%******************************************************************************% + \chapter{Por quê ?} + + A Norma foi cuidadosamente elaborada para suprir diversas necessidades + pedagógicas. Aqui estão alguns dos motivos mais importantes por trás das + escolhas abaixo: + \begin{itemize} + + \item Sequenciamento: programar implica dividir uma tarefa grande e + complexa em uma série de instruções elementares. Todas essas instruções + vão ser executadas em sequência: uma após a outra. Um iniciante, ao + começar a criar software, precisa de uma arquitetura simples e + clara para seu projeto, tendo o entendimento completo de todas as + instruções individuais e da exata ordem de execução. Sintaxes de + linguagens crípticas que aparentam executar múltiplas + instruções ao mesmo tempo são confusas, funções que buscam abordar + múltiplas tarefas misturadas na mesma porção de código são fontes + de erros. \\ + A Norma pede que você escreva trechos simples de código cujas tarefas + possam ser entendidas e verificadas facilmente, em que a sequência de + execução das instruções não deixa dúvidas. Por este motivo que há o + limite máximo de 25 linhas por função, e o porquê de \texttt{for}, + \texttt{do .. while}, ou ternários serem proibidos. + + + \item Estética: enquanto se relaciona com seus colegas durante o processo + natural de aprendizado entre pares, e também durante as avaliações + entre pares, você não quer gastar tempo decifrando o código deles, + mas falar diretamente sobre a lógica daquele trecho de código.\\ A + Norma pede por uma estética específica, provendo instruções para + nomear funções e variáveis, indentação, utilização das chaves, + tabulações e espaços em diversos lugares... . Isso vai permitir que + você olhe brevemente para o código de outros e o ache familiar, podendo + ir direto ao assunto, ao invés de gastar tempo lendo o código antes + de entendê-lo. A Norma também se caracteriza como uma marca + registrada. Como parte da comunidade 42, você vai poder reconhecer + código escrito por outro cadete ou alumni da 42 quando estiver no + mercado de trabalho. + + + \item Visão de longo prazo: esforçar-se para escrever um código compreensível + é a melhor maneira de administrá-lo. Toda vez que alguém, incluindo + você, precisar consertar um bug ou adicionar uma nova + funcionalidade, não será necessário gastar tempo tentando entender + o funcionamento se você escreveu seu código da maneira correta. Isso + vai evitar situações em que trechos de código deixam de ser + atualizados apenas por tomarem tempo, o que vai fazer a diferença + ao falarmos sobre ter um produto bem sucedido no mercado. Quanto + mais cedo aprender, melhor. + + + \item Referências: você pode pensar que algumas, ou todas, as regras + inclusas na Norma são arbitrárias, mas nós pensamos cuidadosamente e + pesquisamos como elaborá-la. Nós encorajamos fortemente que você + pesquise o porquê de funções precisarem ser curtas e possuir apenas + uma tarefa, o porquê de nomes de variáveis precisarem ser + compreensíveis, o porquê de linhas não poderem extrapolar o limite + de 80 colunas de largura, o porquê de uma função não poder receber + vários parâmetros, o porquê de comentários serem úteis, etc. + + + \end{itemize} + + +\newpage + +%******************************************************************************% +% % +% A Norma % +% % +%******************************************************************************% +\chapter{A Norma} + + +%******************************************************************************% +% Convenções de nomeação % +%******************************************************************************% + \section{Denominação} + + \begin{itemize} + + \item O nome de um struct deve começar por + \texttt{s\_}. + + \item O nome de um typedef deve começar por + \texttt{t\_}. + + \item O nome de um union deve começar por \texttt{u\_}. + + \item O nome de um enum deve começar por \texttt{e\_}. + + \item O nome de uma variável global deve começar por \texttt{g\_}. + + \item Identificadores, como nomes de variáveis, funções e tipos definidos pelo usuário, + só podem conter letras minúsculas, dígitos e '\_' (snake\_case). Nenhuma letra maiúscula é permitida. + + \item Nomes de arquivos e diretórios só podem conter letras minúsculas, dígitos e + '\_' (snake\_case). + + \item Caracteres que não fazem parte da tabela ASCII padrão são proibidos, exceto dentro de strings literais e caracteres. + + \item \textit{(*)} Todos os nomes de identificadores (funções, tipos, + variáveis, etc.) devem ser explícitos, ou mnemônicos, + devem ser legíveis em inglês, com cada palavra separada por um underscore. + Isso se aplica a macros, nomes de arquivos e diretórios também. + + \item O uso de variáveis globais que não são marcadas como const ou static é + proibido e é considerado um erro de norma, a menos que o projeto as permita explicitamente. + + \item O arquivo deve compilar. Não se espera que um arquivo que não compila + passe na Norma. + \end{itemize} +\newpage + +%******************************************************************************% +% Formatação % +%******************************************************************************% + \section{Formatação} + + \begin{itemize} + + \item Cada função deve ter no máximo 25 linhas, sem contar as próprias chaves da função. + + \item Cada linha deve ter no máximo 80 colunas de largura, incluindo comentários. + Atenção: uma tabulação não conta como uma única coluna, mas como o número de espaços que ela representa. + + \item Funções devem ser separadas por uma linha vazia. Comentários ou instruções de pré-processador + podem ser inseridos entre funções. Pelo menos uma linha vazia deve existir. + + \item Você deve indentar seu código com tabulações de 4 caracteres. + Isso não é o mesmo que 4 espaços, estamos falando de tabulações reais (caractere ASCII número 9). + Verifique se seu editor de código está corretamente configurado para obter uma indentação visual adequada + que será validada pela \texttt{norminette}. + + \item Blocos dentro de chaves devem ser indentados. As chaves ficam sozinhas em sua própria linha, + exceto na declaração de struct, enum, union. + + \item Uma linha vazia deve estar realmente vazia: sem espaços ou tabulações. + + \item Uma linha nunca pode terminar com espaços ou tabulações. + + \item Nunca pode haver duas linhas vazias consecutivas. + Nunca pode haver dois espaços consecutivos. + + \item Declarações devem estar no início de uma função. + + \item Todos os nomes de variáveis devem ser indentados na mesma + coluna em seu escopo. Nota: os tipos já são indentados pelo bloco que os contém. + + \item Os asteriscos que acompanham ponteiros devem estar colados + ao nome da variável. + + \item Apenas uma declaração de variável por linha. + + \item Declaração e inicialização não podem estar + na mesma linha, exceto para variáveis globais (quando permitido), + variáveis estáticas e constantes. + + \item Em uma função, você deve colocar uma linha vazia entre + as declarações de variáveis e o restante da função. + Nenhuma outra linha vazia é permitida dentro de uma função. + + \item Apenas uma instrução ou estrutura de controle por linha é permitida. Ex.: Atribuição em + uma estrutura de controle é proibida, duas ou mais atribuições na mesma linha são proibidas, + uma nova linha é necessária ao final de uma estrutura de controle, ... . + + \item Uma instrução ou estrutura de controle pode ser dividida em várias linhas quando necessário. + As linhas seguintes devem ser indentadas em relação à primeira linha, + espaços naturais devem ser usados para cortar a linha, e, se aplicável, operadores devem estar + no início da nova linha e não no final da anterior. + + \item A menos que seja o final de uma linha, cada vírgula ou ponto e vírgula + deve ser seguido por um espaço. + + \item Cada operador ou operando deve ser separado por um + - e apenas um - espaço. + + \item Cada palavra-chave do C deve ser seguida por um espaço, exceto por + palavras-chave de tipos (como int, char, float, etc.), + assim como sizeof. + + \item Estruturas de controle (if, while..) devem usar chaves, a menos que contenham uma única + instrução em uma única linha. + + \end{itemize} + +\vspace{1cm} + + Exemplo geral: + \begin{42ccode} +int g_global; +typedef struct s_struct +{ + char *my_string; + int i; +} t_struct; +struct s_other_struct; + +int main(void) +{ + int i; + char c; + + return (i); +} + \end{42ccode} + \newpage + +%******************************************************************************% +% Parâmetros de função % +%******************************************************************************% + \section{Funções} + + \begin{itemize} + + \item Uma função pode ter até 4 parâmetros definidos no máximo. + + \item Uma função que não tem argumentos deve ser + explicitamente prototipada com a palavra "void" como o + argumento. + + \item Parâmetros em protótipos de funções devem ser nomeados. + + \item Cada função deve ser separada da próxima por + uma linha vazia. + + \item Você não pode declarar mais de 5 variáveis por função. + + \item O retorno de uma função deve estar entre parênteses, a menos + que a função retorne nada. + + \item Cada função deve ter uma tabulação única entre seu + tipo de retorno e seu nome. + + \end{itemize} + +\vspace{1cm} + + \begin{42ccode} +int my_func(int arg1, char arg2, char *arg3) +{ + return (my_val); +} + +int func2(void) +{ + return ; +} + \end{42ccode} + + \newpage + + +%******************************************************************************% +% Typedef, struct, enum e union % +%******************************************************************************% + \section{Typedef, struct, enum e union} + + \begin{itemize} + + \item Como outras palavras-chave do C, adicione um espaço entre ``struct'' e o nome + ao declarar uma struct. O mesmo se aplica para enum e union. + + \item Ao declarar uma variável do tipo struct, aplique a indentação usual para o nome + da variável. O mesmo se aplica para enum e union. + + \item Dentro das chaves da struct, enum, union, as regras regulares de indentação + se aplicam, como em qualquer outro bloco. + + \item Como outras palavras-chave do C, adicione um espaço após ``typedef'', + e aplique a indentação regular para o novo nome definido. + + \item Você deve indentar todos os nomes das estruturas na mesma coluna para seu escopo. + + \item Você não pode declarar uma estrutura em um arquivo .c. + + \end{itemize} + \newpage + + +%******************************************************************************% +% Headers % +%******************************************************************************% + \section{Headers - ou arquivos de inclusão} + + \begin{itemize} + + \item \textit{(*)} Os elementos permitidos em um arquivo header são: + inclusões de headers (sistema ou não), declarações, defines, + protótipos e macros. + + \item Todas as inclusões devem estar no início do arquivo. + + \item Você não pode incluir um arquivo C em um header ou em outro arquivo C. + + \item Arquivos header devem ser protegidos contra inclusões duplas. Se o arquivo for + \texttt{ft\_foo.h}, sua macro de proteção deve ser \texttt{FT\_FOO\_H}. + + \item \textit{(*)} A inclusão de headers não utilizados é proibida. + + \item A inclusão de headers pode ser justificada no arquivo .c e no próprio arquivo .h + usando comentários. + + \end{itemize} + +\vspace{1cm} + + \begin{42ccode} +#ifndef FT_HEADER_H +# define FT_HEADER_H +# include +# include +# define FOO "bar" + +int g_variable; +struct s_struct; + +#endif + \end{42ccode} + \newpage + +%******************************************************************************% +% O Header da 42 % +%******************************************************************************% + + \section{O header da 42 - ou comece um arquivo com estilo} + + \begin{itemize} + + \item Todo arquivo .c e .h deve começar imediatamente com o header padrão da 42: + um comentário multilinha com um formato especial incluindo informações úteis. O + header padrão está disponível nos computadores dos clusters para vários + editores de texto (emacs: usando \texttt{C-c C-h}, vim usando \texttt{:Stdheader} ou + \texttt{F1}, etc...). + + \item \textit{(*)} O header da 42 deve conter várias informações atualizadas, incluindo o + criador com login e e-mail estudantil (@student.campus), a data de criação, + o login e a data da última atualização. Cada vez que o arquivo for salvo no disco, + as informações devem ser atualizadas automaticamente. + + \end{itemize} + \info{ + O header padrão pode não estar automaticamente configurado com suas informações pessoais. + Você pode precisar alterá-lo para seguir a regra anterior. + } + + \newpage + +%******************************************************************************% +% Macros e pré-processadores % +%******************************************************************************% + \section{Macros e Pré-processadores} + + \begin{itemize} + + \item \textit{(*)} Constantes de pré-processador (ou \#define) que você criar devem ser usadas + apenas para valores literais e constantes. + \item \textit{(*)} Todo \#define criado para burlar a norma e/ou ofuscar + o código é proibido. + \item \textit{(*)} Você pode usar macros disponíveis em bibliotecas padrão, somente + se estas forem permitidas no escopo do projeto em questão. + \item Macros multilinha são proibidas. + \item Nomes de macros devem estar todos em maiúsculas. + \item Você deve indentar diretivas de pré-processador dentro de blocos \#if, \#ifdef + ou \#ifndef. + \item Instruções de pré-processador são proibidas fora do escopo global. + + \end{itemize} + \newpage + + +%******************************************************************************% +% Coisas proibidas! % +%******************************************************************************% + \section{Coisas proibidas!} + + \begin{itemize} + + \item Você não tem permissão para usar: + + \begin{itemize} + + \item for + \item do...while + \item switch + \item case + \item goto + + \end{itemize} + + \item Operadores ternários como `?'. + + \item VLAs - Arrays de comprimento variável. + + \item Tipo implícito em declarações variáveis. + + \end{itemize} + +\vspace{1cm} + + \begin{42ccode} + int main(int argc, char **argv) + { + int i; + char string[argc]; // This is a VLA + + i = argc > 5 ? 0 : 1 // Ternary + } + \end{42ccode} + \newpage + +%******************************************************************************% +% Comments % +%******************************************************************************% + \section{Comentários} + + \begin{itemize} + + \item Comentários não podem estar dentro do corpo das funções. + Comentários devem estar no final de uma linha ou em sua própria linha. + + \item \textit{(*)} Seus comentários devem estar em inglês e ser úteis. + + \item \textit{(*)} Um comentário não pode justificar a criação de uma função genérica/faz-tudo ou ruim. + + \end{itemize} + + \warn{ + Uma função genérica/faz-tudo ou ruim geralmente possui nomes que não são + explícitos, como f1, f2... para funções e a, b, c,... para nomes de variáveis. + Uma função cujo único objetivo é evitar a norma, sem um propósito + lógico único, também é considerada uma função ruim. + Lembre-se de que é desejável ter funções claras e legíveis que realizem + uma tarefa clara e simples. Evite qualquer técnica de ofuscação de código, + como one-liner, ... . + } + \newpage + + +%******************************************************************************% +% Files % +%******************************************************************************% + \section{Arquivos} + + \begin{itemize} + + \item Você não pode incluir um arquivo .c em um arquivo .c. + + \item Você não pode ter mais de 5 definições de função em um arquivo .c. + + \end{itemize} + \newpage + + +%******************************************************************************% +% Makefile % +%******************************************************************************% + \section{Makefile} + + Makefiles não são verificados pela \texttt{norminette} e devem ser checados durante a avaliação pelo + estudante quando solicitado pelas diretrizes de avaliação. A menos que haja instruções específicas, as seguintes regras + se aplicam aos Makefiles: + \begin{itemize} + + \item As regras \textit{\$(NAME)}, \textit{clean}, \textit{fclean}, \textit{re} e \textit{all} + são obrigatórias. A regra \textit{all} deve ser a padrão e executada ao digitar apenas \texttt{make}. + + \item Se o makefile fizer relink quando não for necessário, o projeto será considerado + não funcional. + + \item No caso de um projeto com múltiplos binários, além das regras acima, você deve ter uma regra para cada binário (ex: \$(NAME\_1), \$(NAME\_2), ...). + A regra ``all'' irá compilar todos os binários, utilizando a regra de cada binário. + + \item No caso de um projeto que utiliza uma função de uma biblioteca não do sistema + (ex.: \texttt{libft}) que existe junto ao seu código fonte, seu makefile deve compilar + essa biblioteca automaticamente. + + \item Todos os arquivos fonte necessários para compilar seu projeto devem + ser explicitamente nomeados no seu Makefile. Ex.: nada de ``*.c'', nada de ``*.o'', etc ... + + \end{itemize} + + +\end{document} +%******************************************************************************% diff --git a/pdf/tr.norm.pdf b/pdf/tr.norm.pdf new file mode 100644 index 00000000..aaea1a87 Binary files /dev/null and b/pdf/tr.norm.pdf differ diff --git a/pdf/tr.norm.pdf.version b/pdf/tr.norm.pdf.version new file mode 100644 index 00000000..56a6051c --- /dev/null +++ b/pdf/tr.norm.pdf.version @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/pdf/tr.norm.tex b/pdf/tr.norm.tex new file mode 100644 index 00000000..68d52f36 --- /dev/null +++ b/pdf/tr.norm.tex @@ -0,0 +1,438 @@ +% vim: set ts=4 sw=4 tw=80 noexpandtab: +\documentclass{42-en} +\newcommand\qdsh{\texttt{42sh}} + +%******************************************************************************% +% % +% Prologue % +% % +%******************************************************************************% + +\begin{document} +\title{Norm} +\subtitle{Sürüm3} + +\summary +{ + Özet: Bu belge 42’de uygulanabilecek standardı (Norm) tanımlamaktadır. + Bir programlama standardı, kod yazarken uyulması gereken kurallar bütününü + tanımlar. Norm, aksi kararlaştırılmadıkça Inner Circle kapsamındaki + tüm C projelerine, özel olarak belirtildiği takdirde de diğer her türlü + projeye uygulanır. +} + +\maketitle + +\tableofcontents + + + +%******************************************************************************% +% % +% Foreword % +% % +%******************************************************************************% +\chapter{Önsöz} + + Norm, Python’da yazılmıştır ve açık kaynaklı bir projedir(?). + Veri havuzuna https://github.com/42School/norminette üzerinden erişilebilir. + Pull request, öneri ve sorunlara ilişkin iletişime geçmekten çekinmeyin! + +%******************************************************************************% +% % +% The Norm % +% % +%******************************************************************************% +\chapter{Norm} + + +%******************************************************************************% +% Naming conventions % +%******************************************************************************% + \section{İsimlendirme} + + \begin{itemize} + + \item Bir struct’ın ismi mutlaka \texttt{s\_} ile başlamalıdır. + + \item Bir typedef’in ismi mutlaka \texttt{t\_} ile başlamalıdır + + \item Bir union’ın ismi mutlaka \texttt{u\_} ile başlamalıdır. + + \item Bir enum’ın ismi mutlaka \texttt{e\_} ile başlamalıdır. + + \item Bir global’ın ismi mutlaka \texttt{g\_} ile başlamalıdır. + + \item Değişkenler ile fonksiyonların isimleri yalnızca küçük harfler, rakamlar ve '\_' (Unix Case) içerebilir. + + \item Dosyalar ile dizinlerin isimleri yalnızca küçük harfler, rakamlar ve '\_' (Unix Case) içerebilir. + + \item Standart ASCII tablosunda yer almayan karakterlerin kullanımı yasaktır. + + \item Değişkenler, fonksiyonlar ve diğer tüm belirleyicilerde snake case kullanılmalıdır. Büyük harf kullanılmamalı ve her bir kelime alt çizgi ile ayrılmalıdır. + + \item Tüm belirleyiciler (fonksiyonlar, makrolar, tipler, değişkenler vs.) İngilizce olmalıdır. + + \item Nesneler (değişkenler, fonksiyonlar, makrolar, tipler, dosyalar veya dizinler) mümkün olan en açık ve +akılda kalıcı şekilde isimlendirilmelidir. + + \item Const ve static olarak işaretlenmemiş global değişlenlerin kullanılması yasaktır ve bu durum, projenin +bunlara açıkça izin vermediği hallerde norm hatası olarak değerlendirilir + + \item Bir dosya mutlaka derlenebilmelidir. Derlenemeyen bir dosyanın Norm’a uymasısöz konusu olmayacaktır. + + \end{itemize} +\newpage + +%******************************************************************************% +% Formatting % +%******************************************************************************% + \section{Format} + + \begin{itemize} + + \item Kodunuzu mutlaka 4 boşluk ile indentlemelisiniz. Burada bahsedilen boşluk 4 ortalama boşluk anlamına +gelmemekte olup, gerçek anlamda tab tuşuna basılmasını ifade etmektedir. + + \item Her fonksiyon, fonksiyonun kendi kıvrımlı ayraçları (curly bracket) hariç, maksimum 25 satırdan oluşmalıdır. + + \item Her satır, yorumlarla birlikte, maksimum 80 sütun genişliğinde olmalıdır. Uyarı: bir kez tablanmış olma bir +sütun olarak sayılmamakta, karşılık geldiği boşluk sayısı kadar dikkate alınmaktadır. + + \item Her fonksiyon yeni bir satır başı ile ayrılmalıdır. Herhangi bir yorum veya ön işlemci, fonksiyonun hemen +üzerinde yer alabilir. Satır başı bir önceki fonksiyondan sonra gelir. + + \item Her satırda tek bir talimat yer almalıdır. + + \item Boş bir satır mutlaka boş olmalıdır: herhangi bir boşluk veya tab olmamalıdır. + + \item Bir satır asla boşluk veya tab ile bitemez. + + \item Hiçbir zaman peş peşe iki boşluk bırakamazsınız. + + \item Her bir kıvrımlı ayraçtan (curly bracket) sonra veya kontrol yapısının sonunda yeni bir satıra geçmelisiniz. + + \item Bir satırın sonu olmadığı takdirde, her virgül ve noktalı virgülden sonra bir boşluk bırakılmalıdır. + + \item Her bir operatör veya operand yalnızca ve yalnızca tek bir boşluk ile ayrılmalıdır. + + \item Tipler (int, char, float vs. gibi) için olanlar hariç her bir C sözcüğünden (keyword) ve sizeof’tan sonra bir +boşluk bırakılmalıdır. + + \item Her bir değişken declarationı kendi kapsamına göre ilgili sütunda indentlenmiş olmalıdır + + \item Pointer’larla birlikte kullanılan asteriskler değişken isimlerine bağlı olmalıdır + + \item Her satırda tek bir değişken declarationı yer almalıdır. + + \item Global değişkenler (izin verilmesi halinde), statik değişkenler ve sabitler dışında, declarationlar ve initializationlar aynı satırda yer alamaz. + + \item Declarationlar fonksiyonların başında yer almalıdır. + + \item Bir fonksiyonda, değişken declarationları ile fonksiyonun geri kalanı arasında boş bir satır bırakmalısınız. +Fonksiyonda başka herhangi bir boş satıra izin verilmez. + + \item Çoklu atamalar kesinlikle yasaktır. + + \item Bir talimat ya da structuredan sonra yeni bir satır ekleyebilirsiniz, ancak bu durumda ayraçlar (brackets) +veya atama operatörü ile bir girinti (indentation) eklemeniz gerekecektir. Operatörler satırın başında olmalıdır. + + \item Kontrol yapılarında (if, while..), tek bir çizgi ya da tek bir koşul içerdikleri haller dışında, ayraç (brace) +bulunmalıdır. + + \end{itemize} + + \newpage + + General example: + \begin{42ccode} +int g_global; +typedef struct s_struct +{ + char *my_string; + int i; +} t_struct; +struct s_other_struct; + +int main(void) +{ + int i; + char c; + + return (i); +} + \end{42ccode} + \newpage + +%******************************************************************************% +% Function parameters % +%******************************************************************************% + \section{Fonksiyonlar} + + \begin{itemize} + + \item Bir fonksiyon maksimum 4 isimlendirilmiş parametre alabilir. + + \item Herhangi bir parametre almamış fonksiyonlar mutlaka parametre kısmına açıkça ‘void ’yazılarak prototiplendirilmelidir. + + \item Fonksiyonların prototiplerindeki parametreler mutlaka isimlendirilmelidir. + + \item Her fonksiyon bir sonrakinden mutlaka boş bir satır ile ayrılmalıdır. + + \item Fonksiyon başına 5 değişkenden fazlasını declare edemezsiniz. + + \item Bir fonksiyonun geri dönüşü (return) parantez içinde olmalıdır. + + \item Her fonksiyonun geri dönüş (return) tipi ve ismi arasında tek bir tab bulunmalıdır. + + \begin{42ccode} +int my_func(int arg1, char arg2, char *arg3) +{ + return (my_val); +} + +int func2(void) +{ + return ; +} + \end{42ccode} + + \end{itemize} + \newpage + + +%******************************************************************************% +% Typedef, struct, enum and union % +%******************************************************************************% + \section{Typedef, struct, enum ve union} + + \begin{itemize} + + \item Bir struct, enum veya union declare edilirken bir tab ekleyin. + + \item Bir tip struct, enum veya unionının değişkenini declare ederken tipe tek bir boşluk ekleyin. + + \item Typedef ile bir struct, union veya enum declare ederken, tüm indentleme kuralları uygulanır. Typedef’in adı +ile struct/union/enum’ın adını hizalamalısınız. + + \item Tüm structureların isimlerini kendi kapsamına göre ilgili sütunda indentlemelisiniz. + + \item Bir structureı bir .c dosyasının içinde declare edemezsiniz. + + \end{itemize} + \newpage + + +%******************************************************************************% +% Headers % +%******************************************************************************% + \section{Header Dosyaları} + + \begin{itemize} + + \item Başlık dosyalarında izin verilenler: başlık inclusionları (sistem veya değil), declarationlar, prototipler ve +makrolar. + + \item Tüm includelar dosyanın başlangıcında olmalıdır + + \item Bir C dosyası include edemezsiniz. + + \item Başlık dosyaları çift inclusiondan korunmalıdır. Eğer dosya \texttt{ft\_foo.h} ise, onun makro karşılığı \texttt{FT\_FOO\_H} +şeklindedir. + + \item Kullanılmayan başlık inclusionları (.h) yasaktır + + \item Tüm başlık inclusionları hem .c dosyasında hem de .h dosyasında doğrulanmalıdır. + + \end{itemize} + + \begin{42ccode} +#ifndef FT_HEADER_H +# define FT_HEADER_H +# include +# include +# define FOO "bar" + +int g_variable; +struct s_struct; + +#endif + \end{42ccode} + \newpage + +%******************************************************************************% +% Macros and Pre-processors % +%******************************************************************************% + \section{Makrolar ve Ön İşlemciler} + + \begin{itemize} + + \item Yarattığınız ön işlemci sabitleri (veya \#define) yalnızca gerçek ve sabit değerler için kullanılmalıdır. + + \item Normu bypass etmek ve/veya kod karıştırmak için yaratılan tüm \#define yasaktır. Bu kısım bir insan tarafından kontrol edilmelidir. + + \item Standart kütüphanelerdeki makroları, yalnızca verilen projelerin kapsamında bunlara izin verilmesi halinde +kullanabilirsiniz. + + \item Multiline makrolar yasaktır. + + \item Makro isimlerinin tamamı büyük harf olmalıdır. + + \item \#if, \#ifdef veya \#ifndefden sonra gelen karakterleri indentlemelisiniz. + + \end{itemize} + \newpage + + +%******************************************************************************% +% Forbidden stuff! % +%******************************************************************************% + \section{Yasaklar!} + + \begin{itemize} + + \item Aşağıdakileri kullanma izniniz bulunmamaktadır: + + \begin{itemize} + + \item for + \item do...while + \item switch + \item case + \item goto + + \end{itemize} + + \item ‘? ’gibi ternary operatörleri. + + \item VLAlar - Variable Length Arrays (Değişken Uzunluklu Dizi) + + \item Değişlen declarationlarında implicit tip. + + \end{itemize} + \begin{42ccode} + int main(int argc, char **argv) + { + int i; + char string[argc]; // This is a VLA + + i = argc > 5 ? 0 : 1 // Ternary + } + \end{42ccode} + \newpage + +%******************************************************************************% +% Comments % +%******************************************************************************% + \section{Yorumlar} + + \begin{itemize} + + \item Yorumlar fonksiyon gövdelerinin içinde olamaz. Yorumlar bir satırın veya kendi satırlarının sonunda olmalıdır. + + \item Yorumlarınız İngilizce ve kullanışlı olmalıdır. + + \item Bir yorum ‘nahoş’ bir fonksiyonu doğrulayamaz. + + \end{itemize} + \newpage + + +%******************************************************************************% +% Files % +%******************************************************************************% + \section{Dosyalar} + + \begin{itemize} + + \item Bir .c dosyasını include edemezsiniz. + + \item Bir .c dosyasında 5 taneden fazla fonksiyon tanımına yer veremezsiniz. + + \end{itemize} + \newpage + + +%******************************************************************************% +% Makefile % +%******************************************************************************% + \section{Makefile} + + Makefilelar Norm tarafından kontrol edilmez, öğrenci tarafından gelişim süresince kontrol edilmelidir. + + \begin{itemize} + + \item \$(NAME), clean, fclean, re ve tüm kurallar bağlayıcıdır. + + \item Makefile relink ederse, proje işlevsiz kabul edilecektir + + \item Çoklu ikili bir proje söz konusu ise, yukarıdaki kurallara ek olarak, her iki ikiliyi de compile eden bir kural +ile compile edilen her bir ikiliye özgü ayrı bir kurala sahip olmalısınız. + + \item Sistem dışı bir kütüphaneden (libft vs.) fonksiyon çağıran bir proje söz konusu ise, makefileınız bu kütüphane +ile otomatik olarak compile etmelidir. + + \item Projenizi compile etmeniz gereken tüm kaynak dosyalar Makefile’ınızda açıkça isimlendirilmiş olmalıdır. + + \end{itemize} + + \newpage +%******************************************************************************% +% Pedago explanations % +%******************************************************************************% + \section{Neden?} + + Norm, pek çok pedagojik ihtiyacı karşılamak amacıyla titizlikle hazırlanmıştır. Yukarıdaki tüm seçenekler için +en önemli nedenler şu şekildedir: + + \begin{itemize} + + \item Sıralama: Kodlama büyük ve karmaşık bir görevin uzun bir dizi basit + talimatlara bölünmesini sağlar. Tüm bu talimatlar, biri diğerini izleyecek + şekilde, sırayla yerine getirilir. Yazılım yaratmaya yeni başlayan bir kişi, + projesi için, tüm bireysel talimatlara ve gerçekleştirileceklerin doğru + sıralanmasına yönelik tam bir anlayış ile birlikte basit ve yalın bir mimariye + gereksinim duyar. Aynı anda birden fazla talimatı yerine getiren kriptik dildeki + dizilimler kafa karıştırıcıyken, tek bir kod paydasında karışık bir şekilde yer + alan birden fazla göreve işaret eden fonksiyonlar ise hatalara kaynak teşkil eder. + Norm sizden, her bir parçanın kendine özgü görevinin açıkça anlaşılabilir ve + doğrulanabilir olduğu ve tüm talimatların uygulanmasına ilişkin sıralamanın + şüpheye mahal vermeyecek nitelik arz ettiği basit kod paydaları oluşturmanızı + talep eder. Fonksiyonlarda maksimum 25 satıra yer verilmesini talep etmemizin ve for, + do…while veya ternary kullanımlarının yasak olmasının sebebi de budur. + + \item Görünüm ve Tavır: Arkadaşlarınızla bilgi alışverişi esnasında veya sizinle + aynı pozisyondaki iş arkadaşlarınızla birlikte öğrenme ve ayrıca birbirinizi + değerlendirme süreçlerinizde, onların kodlarını deşifre etmekle vakit kaybetmek + istemez, doğrudan kodlarının arkasında yatan mantığa dair sohbet etmek istersiniz. + Norm sizden, fonksiyon ve değişkenlerin isimlendirilmesi, indent edilmesi, ayraç + (brace) kuralları, pek çok yerde kullanılan tab ve boşluklara vs. ilişkin kendinize + özgü bir görünüm ve tavır kullanmanızı talep eder. Bu size, başkalarının sizinkilere + benzer görünümdeki kodlarını, kodları anlamadan önce okumaya vakit harcamaya gerek + kalmaksızın, kolaylıkla incelemeniz imkanını sunar. Norm aynı zamanda bir marka değeri + taşımaktadır. 42 topluluğunun bir parçası olarak, iş pazarına girdiğinizde, diğer 42 + öğrenci veya mezunları tarafından yazılmış kodları da tanıyor olma imkanına sahip + olacaksınız. + + \item Uzun vadeli vizyon: Anlaşılabilir bir kod yazmak için gerekli çabayı sarf etmek, + onun sürdürülebilirliğini sağlamanın en iyi yoludur. Siz dahil herhangi bir kişinin, + herhangi bir bugı onarmaya veya yeni bir özellik eklemeye ihtiyaç duyduğu her an, + bir önceki kişi işini doğru şekilde yapmış ise, kıymetli vaktinizi kaybetmenizin + önüne geçilmiş olacaktır. Bu da kodların zaman kaybı nedeniyle sürdürülebilirliğini + yitirmesinin engellenmesini sağlayacak ve pazarda başarılı bir ürüne sahip olmaktan + bahsederken fark yaratacaktır. Bunu yapmayı ne kadar erken öğrenirseniz, sizin için o + kadar iyi olur. + + \item Norm’da yer alan kuralların bir kısmının veya tamamının tartışmaya açık olduğunu + düşünebilirsiniz, ancak biz ne yapılması ve nasıl yapılması gerektiğine ilişkin çok + fazla düşündük ve araştırma yaptık. Sizi fonksiyonların neden kısa olması ve tek bir + şey yapmaya yönelik olması gerektiğine, değişkenlerin isimlerinin neden bir anlam + ifade etmesi gerektiğine, satırların neden 80 sütun genişliğinden daha uzun olmaması + gerektiğine, bir fonksiyonun neden birden fazla parametre almaması gerektiğine, + yorumların neden faydalı olması gerektiğine vs. vs. ilişkin bir Google araması + yapmaya teşvik etmek isteriz. + + \end{itemize} + +\end{document} +%******************************************************************************% diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 00000000..7c0837e7 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,314 @@ +# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand. + +[[package]] +name = "argparse" +version = "1.4.0" +description = "Python command-line parsing library" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "argparse-1.4.0-py2.py3-none-any.whl", hash = "sha256:c31647edb69fd3d465a847ea3157d37bed1f95f19760b11a47aa91c04b666314"}, + {file = "argparse-1.4.0.tar.gz", hash = "sha256:62b089a55be1d8949cd2bc7e0df0bddb9e028faefc8c32038cc84862aefdd6e4"}, +] + +[[package]] +name = "cachetools" +version = "5.3.1" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "cachetools-5.3.1-py3-none-any.whl", hash = "sha256:95ef631eeaea14ba2e36f06437f36463aac3a096799e876ee55e5cdccb102590"}, + {file = "cachetools-5.3.1.tar.gz", hash = "sha256:dce83f2d9b4e1f732a8cd44af8e8fab2dbe46201467fc98b3ef8f269092bf62b"}, +] + +[[package]] +name = "chardet" +version = "5.1.0" +description = "Universal encoding detector for Python 3" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "chardet-5.1.0-py3-none-any.whl", hash = "sha256:362777fb014af596ad31334fde1e8c327dfdb076e1960d1694662d46a6917ab9"}, + {file = "chardet-5.1.0.tar.gz", hash = "sha256:0d62712b956bc154f85fb0a266e2a3c5913c2967e00348701b32411d6def31e5"}, +] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["dev"] +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "distlib" +version = "0.3.9" +description = "Distribution utilities" +optional = false +python-versions = "*" +groups = ["dev"] +files = [ + {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, + {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, +] + +[[package]] +name = "exceptiongroup" +version = "1.1.1" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "exceptiongroup-1.1.1-py3-none-any.whl", hash = "sha256:232c37c63e4f682982c8b6459f33a8981039e5fb8756b2074364e5055c498c9e"}, + {file = "exceptiongroup-1.1.1.tar.gz", hash = "sha256:d484c3090ba2889ae2928419117447a14daf3c1231d5e30d0aae34f354f01785"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "filelock" +version = "3.16.1" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] +typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""] + +[[package]] +name = "flake8" +version = "7.2.0" +description = "the modular source code checker: pep8 pyflakes and co" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "flake8-7.2.0-py2.py3-none-any.whl", hash = "sha256:93b92ba5bdb60754a6da14fa3b93a9361fd00a59632ada61fd7b130436c40343"}, + {file = "flake8-7.2.0.tar.gz", hash = "sha256:fa558ae3f6f7dbf2b4f22663e5343b6b6023620461f8d4ff2019ef4b5ee70426"}, +] + +[package.dependencies] +mccabe = ">=0.7.0,<0.8.0" +pycodestyle = ">=2.13.0,<2.14.0" +pyflakes = ">=3.3.0,<3.4.0" + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +description = "McCabe checker, plugin for flake8" +optional = false +python-versions = ">=3.6" +groups = ["dev"] +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + +[[package]] +name = "packaging" +version = "23.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, + {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, +] + +[[package]] +name = "platformdirs" +version = "4.3.6" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] + +[[package]] +name = "pluggy" +version = "1.0.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.6" +groups = ["dev"] +files = [ + {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, + {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "pycodestyle" +version = "2.13.0" +description = "Python style guide checker" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pycodestyle-2.13.0-py2.py3-none-any.whl", hash = "sha256:35863c5974a271c7a726ed228a14a4f6daf49df369d8c50cd9a6f58a5e143ba9"}, + {file = "pycodestyle-2.13.0.tar.gz", hash = "sha256:c8415bf09abe81d9c7f872502a6eee881fbe85d8763dd5b9924bb0a01d67efae"}, +] + +[[package]] +name = "pyflakes" +version = "3.3.2" +description = "passive checker of Python programs" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pyflakes-3.3.2-py2.py3-none-any.whl", hash = "sha256:5039c8339cbb1944045f4ee5466908906180f13cc99cc9949348d10f82a5c32a"}, + {file = "pyflakes-3.3.2.tar.gz", hash = "sha256:6dfd61d87b97fba5dcfaaf781171ac16be16453be6d816147989e7f6e6a9576b"}, +] + +[[package]] +name = "pyproject-api" +version = "1.5.1" +description = "API to interact with the python pyproject.toml based projects" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "pyproject_api-1.5.1-py3-none-any.whl", hash = "sha256:4698a3777c2e0f6b624f8a4599131e2a25376d90fe8d146d7ac74c67c6f97c43"}, + {file = "pyproject_api-1.5.1.tar.gz", hash = "sha256:435f46547a9ff22cf4208ee274fca3e2869aeb062a4834adfc99a4dd64af3cf9"}, +] + +[package.dependencies] +packaging = ">=23" +tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} + +[package.extras] +docs = ["furo (>=2022.12.7)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"] +testing = ["covdefaults (>=2.2.2)", "importlib-metadata (>=6) ; python_version < \"3.8\"", "pytest (>=7.2.1)", "pytest-cov (>=4)", "pytest-mock (>=3.10)", "virtualenv (>=20.17.1)", "wheel (>=0.38.4)"] + +[[package]] +name = "pytest" +version = "7.3.2" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "pytest-7.3.2-py3-none-any.whl", hash = "sha256:cdcbd012c9312258922f8cd3f1b62a6580fdced17db6014896053d47cddf9295"}, + {file = "pytest-7.3.2.tar.gz", hash = "sha256:ee990a3cc55ba808b80795a79944756f315c67c12b56abd3ac993a7b8c17030b"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} + +[package.extras] +testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "tox" +version = "4.6.0" +description = "tox is a generic virtualenv management and test command line tool" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "tox-4.6.0-py3-none-any.whl", hash = "sha256:4874000453e637a87ca892f9744a2ab9a7d24064dad1b0ecbf5a4c3c146cc732"}, + {file = "tox-4.6.0.tar.gz", hash = "sha256:954f1f647f67f481d239a193288983242a6152b67503c4a56b19a4aafaa29736"}, +] + +[package.dependencies] +cachetools = ">=5.3" +chardet = ">=5.1" +colorama = ">=0.4.6" +filelock = ">=3.12" +packaging = ">=23.1" +platformdirs = ">=3.5.1" +pluggy = ">=1" +pyproject-api = ">=1.5.1" +tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} +virtualenv = ">=20.23" + +[package.extras] +docs = ["furo (>=2023.5.20)", "sphinx (>=7.0.1)", "sphinx-argparse-cli (>=1.11)", "sphinx-autodoc-typehints (>=1.23,!=1.23.4)", "sphinx-copybutton (>=0.5.2)", "sphinx-inline-tabs (>=2023.4.21)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=22.12)"] +testing = ["build[virtualenv] (>=0.10)", "covdefaults (>=2.3)", "devpi-process (>=0.3)", "diff-cover (>=7.5)", "distlib (>=0.3.6)", "flaky (>=3.7)", "hatch-vcs (>=0.3)", "hatchling (>=1.17)", "psutil (>=5.9.5)", "pytest (>=7.3.1)", "pytest-cov (>=4.1)", "pytest-mock (>=3.10)", "pytest-xdist (>=3.3.1)", "re-assert (>=1.1)", "time-machine (>=2.9) ; implementation_name != \"pypy\"", "wheel (>=0.40)"] + +[[package]] +name = "virtualenv" +version = "20.26.6" +description = "Virtual Python Environment builder" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "virtualenv-20.26.6-py3-none-any.whl", hash = "sha256:7345cc5b25405607a624d8418154577459c3e0277f5466dd79c49d5e492995f2"}, + {file = "virtualenv-20.26.6.tar.gz", hash = "sha256:280aede09a2a5c317e409a00102e7077c6432c5a38f0ef938e643805a7ad2c48"}, +] + +[package.dependencies] +distlib = ">=0.3.7,<1" +filelock = ">=3.12.2,<4" +platformdirs = ">=3.9.1,<5" + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8) ; platform_python_implementation == \"PyPy\" or platform_python_implementation == \"CPython\" and sys_platform == \"win32\" and python_version >= \"3.13\"", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10) ; platform_python_implementation == \"CPython\""] + +[metadata] +lock-version = "2.1" +python-versions = ">=3.10" +content-hash = "edcc9fd82415a834fad3a3e55bcf86d0a8bf1f5d9d01773b1e2da8a11cd3955f" diff --git a/poetry.toml b/poetry.toml new file mode 100644 index 00000000..62e2dff2 --- /dev/null +++ b/poetry.toml @@ -0,0 +1,3 @@ +[virtualenvs] +in-project = true +create = true diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..f4075288 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,52 @@ +[tool.poetry] +name = "norminette" +version = "3.3.59" +description = "Open source C files linter for 42 Network campuses" +authors = ["42 "] +license = "MIT Licence" +readme = "README.md" +repository = "https://github.com/42School/norminette" +keywords = ["42", "norminette"] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Natural Language :: English", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", +] +include = [ + "norminette/locale/**/LC_MESSAGES/*.mo" +] + +[tool.tox] +legacy_tox_ini = """ +[tox] +skipsdist = true +envlist = py310, py311, py312, py313 + +[testenv] +allowlist_externals = poetry +commands = + poetry install -v + poetry run pytest +""" + +[tool.poetry.dependencies] +python = ">=3.10" +argparse = "^1.4.0" + +[tool.poetry.group.dev.dependencies] +pytest = "^7.3.2" +tox = "^4.6.0" +flake8 = ">=7,<8" + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry.scripts] +norminette = "norminette.__main__:main" diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 1352d5e6..00000000 --- a/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -argparse diff --git a/setup.py b/setup.py deleted file mode 100644 index 78eb6ea4..00000000 --- a/setup.py +++ /dev/null @@ -1,22 +0,0 @@ -from setuptools import setup, find_namespace_packages - -import os -import setuptools -import subprocess -from norminette.version import __version__ -subprocess.call("pip install -r requirements.txt", shell=True) - -setup( - name="norminette", - version=__version__, - author="42", - author_email="pedago@42.fr", - description="Open source norminette", - package_dir={'lexer':'norminette/lexer', 'rules':'norminette/rules', 'tools':'norminette/tools'}, - packages=find_namespace_packages(), - entry_points={ - 'console_scripts': [ - 'norminette = norminette.__main__:main', - ], - }, -) diff --git a/tests/rules/rules_generator_test.py b/tests/rules/rules_generator_test.py new file mode 100644 index 00000000..9192c457 --- /dev/null +++ b/tests/rules/rules_generator_test.py @@ -0,0 +1,31 @@ +import pytest +import glob + +from norminette.file import File +from norminette.lexer import Lexer +from norminette.context import Context +from norminette.registry import Registry +from norminette.errors import HumanizedErrorsFormatter + + +registry = Registry() +test_files = glob.glob("tests/rules/samples/*.[ch]") + + +@pytest.mark.parametrize("file", test_files) +def test_rule_for_file(file, capsys): + with open(file, "r") as test_file: + file_to_lex = test_file.read() + + with open(f"{file.split('.')[0]}.out") as out_file: + out_content = out_file.read() + + file = File(file, file_to_lex) + lexer = Lexer(file) + context = Context(file, list(lexer), debug=2) + registry.run(context) + errors = HumanizedErrorsFormatter(file, use_colors=False) + print(errors, end='') + captured = capsys.readouterr() + + assert captured.out == out_content diff --git a/tests/rules/samples/check_attributes.c b/tests/rules/samples/check_attributes.c new file mode 100644 index 00000000..f80bce15 --- /dev/null +++ b/tests/rules/samples/check_attributes.c @@ -0,0 +1,14 @@ +void fatal(void) __attribute__((noreturn)); + +extern int ft_printf(void *obj, const char *format, ...) + __attribute__ ((format (printf, 2, 3))); + +float __attribute__((overloadable)) len(t_float2 a); + +float len(t_float2 a) __attribute__((overloadable)) +{ + t_float2 v; + + v = a; + return (sqrt(a.x * a.x + a.y * a.y)); +} diff --git a/tests/rules/samples/check_attributes.out b/tests/rules/samples/check_attributes.out new file mode 100644 index 00000000..942c53f8 --- /dev/null +++ b/tests/rules/samples/check_attributes.out @@ -0,0 +1,31 @@ +check_attributes.c - IsFuncPrototype In "GlobalScope" from "None" line 1": + +check_attributes.c - IsEmptyLine In "GlobalScope" from "None" line 2": + +check_attributes.c - IsFuncPrototype In "GlobalScope" from "None" line 3": + + +check_attributes.c - IsEmptyLine In "GlobalScope" from "None" line 5": + +check_attributes.c - IsFuncPrototype In "GlobalScope" from "None" line 6": + +check_attributes.c - IsEmptyLine In "GlobalScope" from "None" line 7": + +check_attributes.c - IsFuncDeclaration In "GlobalScope" from "None" line 8": + +check_attributes.c - IsBlockStart In "Function" from "GlobalScope" line 9": + +check_attributes.c - IsVarDeclaration In "Function" from "GlobalScope" line 10": + +check_attributes.c - IsEmptyLine In "Function" from "GlobalScope" line 11": + +check_attributes.c - IsAssignation In "Function" from "GlobalScope" line 12": + +check_attributes.c - IsExpressionStatement In "Function" from "GlobalScope" line 13": + +check_attributes.c - IsBlockEnd In "Function" from "GlobalScope" line 14": + +check_attributes.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: ATTR_EOL (line: 6, col: 7): Function attribute must be at the end of line +Error: MISALIGNED_FUNC_DECL (line: 6, col: 37): Misaligned function declaration diff --git a/tests/rules/samples/check_preprocessor_define.c b/tests/rules/samples/check_preprocessor_define.c new file mode 100644 index 00000000..9a342eb5 --- /dev/null +++ b/tests/rules/samples/check_preprocessor_define.c @@ -0,0 +1,35 @@ +#define lower_case_macro 1 +#define UPPER_CASE_MACRO 2 +#define PascalCaseMacro 3 + +#define EXTRA_SPACING 0 +#define IlikeTabs 1 +#define WEIRD_SPACING 2 + + +#define HOJEEHDIA \ + 1 /* wat */ + +#define HOJEEHDIA2 "5 do 7 de 2023" + +#define PERGUNTAMO + +#define NUNTI PERGUNTAMO // NADA + +#define NON_CONSTANT 1 + 2 +#define AHN 1 1 1 1 1 1 1 1 1 1 1 + +#define BLA (1) + +#define BLO + 2) + +#define NO_SYMBOLS ? +#define NO_SYMBOLS2 && + +#define ABS(x) ((x) < 0 ? -(x) : (x)) +#define NOT(o) !o + +#define OK + +#define B 'A' +#define B 'a' \ No newline at end of file diff --git a/tests/rules/samples/check_preprocessor_define.out b/tests/rules/samples/check_preprocessor_define.out new file mode 100644 index 00000000..a3415a4a --- /dev/null +++ b/tests/rules/samples/check_preprocessor_define.out @@ -0,0 +1,91 @@ +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": + +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": + +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 3": + +check_preprocessor_define.c - IsEmptyLine In "GlobalScope" from "None" line 4": + +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 5": + +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 6": + +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 7": + +check_preprocessor_define.c - IsEmptyLine In "GlobalScope" from "None" line 8": + +check_preprocessor_define.c - IsEmptyLine In "GlobalScope" from "None" line 9": + +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 10": + +check_preprocessor_define.c - IsEmptyLine In "GlobalScope" from "None" line 12": + +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 13": + +check_preprocessor_define.c - IsEmptyLine In "GlobalScope" from "None" line 14": + +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 15": + +check_preprocessor_define.c - IsEmptyLine In "GlobalScope" from "None" line 16": + +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 17": + +check_preprocessor_define.c - IsEmptyLine In "GlobalScope" from "None" line 18": + +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 19": + +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 20": + +check_preprocessor_define.c - IsEmptyLine In "GlobalScope" from "None" line 21": + +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 22": + +check_preprocessor_define.c - IsEmptyLine In "GlobalScope" from "None" line 23": + +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 24": + +check_preprocessor_define.c - IsEmptyLine In "GlobalScope" from "None" line 25": + +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 26": + +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 27": + +check_preprocessor_define.c - IsEmptyLine In "GlobalScope" from "None" line 28": + +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 29": + +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 30": + +check_preprocessor_define.c - IsEmptyLine In "GlobalScope" from "None" line 31": + +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 32": + +check_preprocessor_define.c - IsEmptyLine In "GlobalScope" from "None" line 33": + +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 34": + +check_preprocessor_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 35": + +check_preprocessor_define.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: MACRO_NAME_CAPITAL (line: 1, col: 9): Macro name must be capitalized +Error: MACRO_NAME_CAPITAL (line: 3, col: 9): Macro name must be capitalized +Error: CONSECUTIVE_WS (line: 5, col: 8): Two or more consecutives white spaces +Error: CONSECUTIVE_WS (line: 6, col: 8): Two or more consecutives white spaces +Error: TAB_REPLACE_SPACE (line: 6, col: 8): Found tab when expecting space +Error: MACRO_NAME_CAPITAL (line: 6, col: 13): Macro name must be capitalized +Error: CONSECUTIVE_WS (line: 7, col: 8): Two or more consecutives white spaces +Error: TAB_REPLACE_SPACE (line: 7, col: 9): Found tab when expecting space +Error: CONSECUTIVE_NEWLINES (line: 9, col: 1): Consecutive newlines +Error: PREPROC_CONSTANT (line: 19, col: 24): Preprocessor statement must only contain constant defines +Error: PREPROC_CONSTANT (line: 20, col: 15): Preprocessor statement must only contain constant defines +Error: PREPROC_CONSTANT (line: 22, col: 13): Preprocessor statement must only contain constant defines +Error: PREPROC_CONSTANT (line: 24, col: 16): Preprocessor statement must only contain constant defines +Error: PREPROC_CONSTANT (line: 26, col: 20): Preprocessor statement must only contain constant defines +Error: TERNARY_FBIDDEN (line: 26, col: 20): Ternaries are forbidden +Error: PREPROC_CONSTANT (line: 27, col: 21): Preprocessor statement must only contain constant defines +Error: MACRO_FUNC_FORBIDDEN (line: 29, col: 12): Macro functions are forbidden +Error: PREPROC_CONSTANT (line: 29, col: 16): Preprocessor statement must only contain constant defines +Error: TERNARY_FBIDDEN (line: 29, col: 25): Ternaries are forbidden +Error: MACRO_FUNC_FORBIDDEN (line: 30, col: 12): Macro functions are forbidden +Error: PREPROC_CONSTANT (line: 30, col: 25): Preprocessor statement must only contain constant defines diff --git a/tests/rules/samples/check_preprocessor_include.c b/tests/rules/samples/check_preprocessor_include.c new file mode 100644 index 00000000..d6dd5197 --- /dev/null +++ b/tests/rules/samples/check_preprocessor_include.c @@ -0,0 +1,22 @@ +#include "ok.h" +#include "error.c" +#include "error" + + +void main(void); + +#include "not in start.h" + +#if 1 +# include "ok but not ok.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include diff --git a/tests/rules/samples/check_preprocessor_include.out b/tests/rules/samples/check_preprocessor_include.out new file mode 100644 index 00000000..3962a0d2 --- /dev/null +++ b/tests/rules/samples/check_preprocessor_include.out @@ -0,0 +1,60 @@ +check_preprocessor_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": + +check_preprocessor_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": + +check_preprocessor_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 3": + +check_preprocessor_include.c - IsEmptyLine In "GlobalScope" from "None" line 4": + +check_preprocessor_include.c - IsEmptyLine In "GlobalScope" from "None" line 5": + +check_preprocessor_include.c - IsFuncPrototype In "GlobalScope" from "None" line 6": + +check_preprocessor_include.c - IsEmptyLine In "GlobalScope" from "None" line 7": + +check_preprocessor_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 8": + +check_preprocessor_include.c - IsEmptyLine In "GlobalScope" from "None" line 9": + +check_preprocessor_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 10": + +check_preprocessor_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 11": + +check_preprocessor_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 12": + +check_preprocessor_include.c - IsEmptyLine In "GlobalScope" from "None" line 13": + +check_preprocessor_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 14": + +check_preprocessor_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 15": + +check_preprocessor_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 16": + +check_preprocessor_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 17": + +check_preprocessor_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 18": + +check_preprocessor_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 19": + +check_preprocessor_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 20": + +check_preprocessor_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 21": + +check_preprocessor_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 22": + +check_preprocessor_include.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: INCLUDE_HEADER_ONLY (line: 2, col: 10): .c file includes are forbidden +Error: INCLUDE_HEADER_ONLY (line: 3, col: 10): .c file includes are forbidden +Error: CONSECUTIVE_NEWLINES (line: 5, col: 1): Consecutive newlines +Error: INCLUDE_START_FILE (line: 8, col: 1): Include must be at the start of file +Error: INCLUDE_START_FILE (line: 11, col: 1): Include must be at the start of file +Error: INCLUDE_START_FILE (line: 14, col: 1): Include must be at the start of file +Error: INCLUDE_START_FILE (line: 15, col: 1): Include must be at the start of file +Error: INCLUDE_START_FILE (line: 16, col: 1): Include must be at the start of file +Error: INCLUDE_START_FILE (line: 17, col: 1): Include must be at the start of file +Error: INCLUDE_START_FILE (line: 18, col: 1): Include must be at the start of file +Error: INCLUDE_START_FILE (line: 19, col: 1): Include must be at the start of file +Error: INCLUDE_START_FILE (line: 20, col: 1): Include must be at the start of file +Error: INCLUDE_START_FILE (line: 21, col: 1): Include must be at the start of file +Error: INCLUDE_START_FILE (line: 22, col: 1): Include must be at the start of file diff --git a/tests/rules/samples/check_preprocessor_indent.c b/tests/rules/samples/check_preprocessor_indent.c new file mode 100644 index 00000000..76c5face --- /dev/null +++ b/tests/rules/samples/check_preprocessor_indent.c @@ -0,0 +1,74 @@ +# define HOJEEHDIA 1 +#ifdef HOJEEHDIA +#define BADIDENT 2 +#if 1 +# error "Ta errado essa indentação aqui hein" +# endif +# endif + +# define bad 1 +// #define toto f(x) +// #define A AB+AB + +#define PRINTF printf // ALIAS + +#define X 2 /* +o X é 2, logo 2 não é X +*/ + +#define NEVER 2 +#if 1 +# if 1 +# if 1 +# endif +# endif +#elif HOJEEHDIA +# +#endif + + #define J 2 + #define NATHAN "lINDO" + #define PANSUDINHO + #define barrigudinho PANSUDINHO + +#include +#include +#include "stdio.h" +#include "stdlib.h" +#include "stdio.h" +#include"stdlib.h" +#include "sys/type.h" +#include +#include < sys_ali / aqui _123_2 .h> + +#include // uint32_t +#include /* malloc */ +#include /* +printf +*/ // é o negócio n ta legal não + +#import +#import // pipoca é bem gostoso + +{ +// We are in GlobalScope here, right? +#define OK + +} + +int main(void) +{ +#ifdef ONLY_GLOBAL_SCOPE + return (1); +#else + return (0); +#endif +} + +#define X +#if 'A' == 'A' +#endif +#if 'A' == 65 +#endif +#if 'A' +#endif \ No newline at end of file diff --git a/tests/rules/samples/check_preprocessor_indent.out b/tests/rules/samples/check_preprocessor_indent.out new file mode 100644 index 00000000..808c11ca --- /dev/null +++ b/tests/rules/samples/check_preprocessor_indent.out @@ -0,0 +1,173 @@ +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 3": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 4": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 5": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 6": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 7": + +check_preprocessor_indent.c - IsEmptyLine In "GlobalScope" from "None" line 8": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 9": + +check_preprocessor_indent.c - IsComment In "GlobalScope" from "None" line 10": + +check_preprocessor_indent.c - IsComment In "GlobalScope" from "None" line 11": + +check_preprocessor_indent.c - IsEmptyLine In "GlobalScope" from "None" line 12": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 13": + +check_preprocessor_indent.c - IsEmptyLine In "GlobalScope" from "None" line 14": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 15": + +check_preprocessor_indent.c - IsEmptyLine In "GlobalScope" from "None" line 18": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 19": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 20": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 21": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 22": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 23": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 24": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 25": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 26": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 27": + +check_preprocessor_indent.c - IsEmptyLine In "GlobalScope" from "None" line 28": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 29": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 30": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 31": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 32": + +check_preprocessor_indent.c - IsEmptyLine In "GlobalScope" from "None" line 33": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 34": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 35": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 36": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 37": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 38": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 39": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 40": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 41": +
+check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 42": +
+check_preprocessor_indent.c - IsEmptyLine In "GlobalScope" from "None" line 43": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 44": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 45": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 46": + +check_preprocessor_indent.c - IsEmptyLine In "GlobalScope" from "None" line 49": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 50": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 51": + +check_preprocessor_indent.c - IsEmptyLine In "GlobalScope" from "None" line 52": + +check_preprocessor_indent.c - IsBlockStart In "GlobalScope" from "None" line 53": + +check_preprocessor_indent.c - IsComment In "GlobalScope" from "None" line 54": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 55": + +check_preprocessor_indent.c - IsEmptyLine In "GlobalScope" from "None" line 56": + +check_preprocessor_indent.c - IsFuncDeclaration In "GlobalScope" from "None" line 57": + + + +check_preprocessor_indent.c - IsBlockStart In "Function" from "GlobalScope" line 60": + +check_preprocessor_indent.c - IsPreprocessorStatement In "Function" from "GlobalScope" line 61": + +check_preprocessor_indent.c - IsExpressionStatement In "Function" from "GlobalScope" line 62": + +check_preprocessor_indent.c - IsPreprocessorStatement In "Function" from "GlobalScope" line 63": + +check_preprocessor_indent.c - IsExpressionStatement In "Function" from "GlobalScope" line 64": + +check_preprocessor_indent.c - IsPreprocessorStatement In "Function" from "GlobalScope" line 65": + +check_preprocessor_indent.c - IsBlockEnd In "Function" from "GlobalScope" line 66": + +check_preprocessor_indent.c - IsEmptyLine In "GlobalScope" from "None" line 67": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 68": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 69": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 70": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 71": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 72": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 73": + +check_preprocessor_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 74": + +check_preprocessor_indent.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: TOO_MANY_WS (line: 1, col: 1): Extra whitespaces for indent level +Error: PREPROC_BAD_INDENT (line: 3, col: 1): Bad preprocessor indentation +Error: PREPROC_BAD_INDENT (line: 4, col: 1): Bad preprocessor indentation +Error: PREPROC_BAD_INDENT (line: 5, col: 1): Bad preprocessor indentation +Error: TOO_MANY_WS (line: 6, col: 1): Extra whitespaces for indent level +Error: TOO_MANY_WS (line: 7, col: 1): Extra whitespaces for indent level +Error: TAB_REPLACE_SPACE (line: 7, col: 2): Found tab when expecting space +Error: TOO_MANY_WS (line: 9, col: 1): Extra whitespaces for indent level +Error: MACRO_NAME_CAPITAL (line: 9, col: 10): Macro name must be capitalized +Error: PREPROC_START_LINE (line: 29, col: 17): Preprocessor statement not at the beginning of the line +Error: PREPROC_START_LINE (line: 30, col: 5): Preprocessor statement not at the beginning of the line +Error: PREPROC_START_LINE (line: 31, col: 5): Preprocessor statement not at the beginning of the line +Error: PREPROC_START_LINE (line: 32, col: 3): Preprocessor statement not at the beginning of the line +Error: MACRO_NAME_CAPITAL (line: 32, col: 11): Macro name must be capitalized +Error: PREPROC_NO_SPACE (line: 34, col: 9): Missing space after preprocessor directive +Error: TAB_REPLACE_SPACE (line: 35, col: 9): Found tab when expecting space +Error: CONSECUTIVE_WS (line: 36, col: 9): Two or more consecutives white spaces +Error: CONSECUTIVE_WS (line: 37, col: 9): Two or more consecutives white spaces +Error: TAB_REPLACE_SPACE (line: 37, col: 11): Found tab when expecting space +Error: CONSECUTIVE_WS (line: 38, col: 9): Two or more consecutives white spaces +Error: TAB_REPLACE_SPACE (line: 38, col: 10): Found tab when expecting space +Error: PREPROC_NO_SPACE (line: 39, col: 9): Missing space after preprocessor directive +Error: TAB_REPLACE_SPACE (line: 50, col: 8): Found tab when expecting space +Error: PREPOC_ONLY_GLOBAL (line: 61, col: 1): Preprocessor statements are only allowed in the global scope +Error: NL_AFTER_PREPROC (line: 62, col: 1): Preprocessor statement must be followed by a newline +Error: PREPOC_ONLY_GLOBAL (line: 63, col: 1): Preprocessor statements are only allowed in the global scope +Error: NL_AFTER_PREPROC (line: 64, col: 1): Preprocessor statement must be followed by a newline +Error: PREPOC_ONLY_GLOBAL (line: 65, col: 1): Preprocessor statements are only allowed in the global scope +Error: NL_AFTER_PREPROC (line: 66, col: 1): Preprocessor statement must be followed by a newline diff --git a/tests/rules/samples/check_preprocessor_indent_2.c b/tests/rules/samples/check_preprocessor_indent_2.c new file mode 100644 index 00000000..c05a83ef --- /dev/null +++ b/tests/rules/samples/check_preprocessor_indent_2.c @@ -0,0 +1,16 @@ +#endif +#else +#endif +#else +#endif +#endif +#endif +#else +#else +#endif +// Ok +#if 1 +#endif +// END Ok +#else +#endif diff --git a/tests/rules/samples/check_preprocessor_indent_2.out b/tests/rules/samples/check_preprocessor_indent_2.out new file mode 100644 index 00000000..b370e7b4 --- /dev/null +++ b/tests/rules/samples/check_preprocessor_indent_2.out @@ -0,0 +1,46 @@ +check_preprocessor_indent_2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": + +check_preprocessor_indent_2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": + +check_preprocessor_indent_2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 3": + +check_preprocessor_indent_2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 4": + +check_preprocessor_indent_2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 5": + +check_preprocessor_indent_2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 6": + +check_preprocessor_indent_2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 7": + +check_preprocessor_indent_2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 8": + +check_preprocessor_indent_2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 9": + +check_preprocessor_indent_2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 10": + +check_preprocessor_indent_2.c - IsComment In "GlobalScope" from "None" line 11": + +check_preprocessor_indent_2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 12": + +check_preprocessor_indent_2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 13": + +check_preprocessor_indent_2.c - IsComment In "GlobalScope" from "None" line 14": + +check_preprocessor_indent_2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 15": + +check_preprocessor_indent_2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 16": + +check_preprocessor_indent_2.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: PREPROC_BAD_ENDIF (line: 1, col: 1): Endif preprocessor statement without if, elif or else +Error: PREPROC_BAD_ELSE (line: 2, col: 1): Else preprocessor statement without if or elif +Error: PREPROC_BAD_ENDIF (line: 3, col: 1): Endif preprocessor statement without if, elif or else +Error: PREPROC_BAD_ELSE (line: 4, col: 1): Else preprocessor statement without if or elif +Error: PREPROC_BAD_ENDIF (line: 5, col: 1): Endif preprocessor statement without if, elif or else +Error: PREPROC_BAD_ENDIF (line: 6, col: 1): Endif preprocessor statement without if, elif or else +Error: PREPROC_BAD_ENDIF (line: 7, col: 1): Endif preprocessor statement without if, elif or else +Error: PREPROC_BAD_ELSE (line: 8, col: 1): Else preprocessor statement without if or elif +Error: PREPROC_BAD_ELSE (line: 9, col: 1): Else preprocessor statement without if or elif +Error: PREPROC_BAD_ENDIF (line: 10, col: 1): Endif preprocessor statement without if, elif or else +Error: PREPROC_BAD_ELSE (line: 15, col: 1): Else preprocessor statement without if or elif +Error: PREPROC_BAD_ENDIF (line: 16, col: 1): Endif preprocessor statement without if, elif or else diff --git a/tests/rules/samples/check_preprocessor_protection.h b/tests/rules/samples/check_preprocessor_protection.h new file mode 100644 index 00000000..f4d57e74 --- /dev/null +++ b/tests/rules/samples/check_preprocessor_protection.h @@ -0,0 +1,16 @@ +#define X 1 + +#ifndef check_preprocessor_protection_h +# define CHECK_PREPROCESSOR_PROTECTION_H +# +#endif + +#define Y 2 + +#ifndef check_preprocessor_protection_h +# define CHECK_PREPROCESSOR_PROTECTION_H +#endif + +#ifndef check_preprocessor_protection_h +# define CHECK_PREPROCESSOR_PROTECTION_H +#endif diff --git a/tests/rules/samples/check_preprocessor_protection.out b/tests/rules/samples/check_preprocessor_protection.out new file mode 100644 index 00000000..4f9c7f5f --- /dev/null +++ b/tests/rules/samples/check_preprocessor_protection.out @@ -0,0 +1,39 @@ +check_preprocessor_protection.h - IsPreprocessorStatement In "GlobalScope" from "None" line 1": + +check_preprocessor_protection.h - IsEmptyLine In "GlobalScope" from "None" line 2": + +check_preprocessor_protection.h - IsPreprocessorStatement In "GlobalScope" from "None" line 3": + +check_preprocessor_protection.h - IsPreprocessorStatement In "GlobalScope" from "None" line 4": + +check_preprocessor_protection.h - IsPreprocessorStatement In "GlobalScope" from "None" line 5": + +check_preprocessor_protection.h - IsPreprocessorStatement In "GlobalScope" from "None" line 6": + +check_preprocessor_protection.h - IsEmptyLine In "GlobalScope" from "None" line 7": + +check_preprocessor_protection.h - IsPreprocessorStatement In "GlobalScope" from "None" line 8": + +check_preprocessor_protection.h - IsEmptyLine In "GlobalScope" from "None" line 9": + +check_preprocessor_protection.h - IsPreprocessorStatement In "GlobalScope" from "None" line 10": + +check_preprocessor_protection.h - IsPreprocessorStatement In "GlobalScope" from "None" line 11": + +check_preprocessor_protection.h - IsPreprocessorStatement In "GlobalScope" from "None" line 12": + +check_preprocessor_protection.h - IsEmptyLine In "GlobalScope" from "None" line 13": + +check_preprocessor_protection.h - IsPreprocessorStatement In "GlobalScope" from "None" line 14": + +check_preprocessor_protection.h - IsPreprocessorStatement In "GlobalScope" from "None" line 15": + +check_preprocessor_protection.h - IsPreprocessorStatement In "GlobalScope" from "None" line 16": + +check_preprocessor_protection.h: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: HEADER_PROT_ALL (line: 3, col: 1): Header protection must include all the instructions +Error: HEADER_PROT_UPPER (line: 3, col: 9): Header protection must be in uppercase +Error: HEADER_PROT_ALL_AF (line: 8, col: 1): Instructions after header protection are forbidden +Error: HEADER_PROT_MULT (line: 10, col: 1): Multiple header protections, only one is allowed +Error: HEADER_PROT_MULT (line: 14, col: 1): Multiple header protections, only one is allowed diff --git a/tests/rules/samples/check_preprocessor_protection_2.h b/tests/rules/samples/check_preprocessor_protection_2.h new file mode 100644 index 00000000..364b3dca --- /dev/null +++ b/tests/rules/samples/check_preprocessor_protection_2.h @@ -0,0 +1,5 @@ +// Ok +#ifndef CHECK_PREPROCESSOR_PROTECTION_2_H +# define CHECK_PREPROCESSOR_PROTECTION_2_H + +#endif diff --git a/tests/rules/samples/check_preprocessor_protection_2.out b/tests/rules/samples/check_preprocessor_protection_2.out new file mode 100644 index 00000000..9ef73958 --- /dev/null +++ b/tests/rules/samples/check_preprocessor_protection_2.out @@ -0,0 +1,12 @@ +check_preprocessor_protection_2.h - IsComment In "GlobalScope" from "None" line 1": + +check_preprocessor_protection_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 2": + +check_preprocessor_protection_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 3": + +check_preprocessor_protection_2.h - IsEmptyLine In "GlobalScope" from "None" line 4": + +check_preprocessor_protection_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 5": + +check_preprocessor_protection_2.h: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/tests/rules/samples/check_preprocessor_protection_3.h b/tests/rules/samples/check_preprocessor_protection_3.h new file mode 100644 index 00000000..016f454e --- /dev/null +++ b/tests/rules/samples/check_preprocessor_protection_3.h @@ -0,0 +1,6 @@ +#ifndef CHECK_PREPROCESSOR_PROTECTION_3_H +// No #define +#endif + +// Just to check that the #define is not added +#define CHECK_PREPROCESSOR_PROTECTION_3_H diff --git a/tests/rules/samples/check_preprocessor_protection_3.out b/tests/rules/samples/check_preprocessor_protection_3.out new file mode 100644 index 00000000..d226a5e7 --- /dev/null +++ b/tests/rules/samples/check_preprocessor_protection_3.out @@ -0,0 +1,16 @@ +check_preprocessor_protection_3.h - IsPreprocessorStatement In "GlobalScope" from "None" line 1": + +check_preprocessor_protection_3.h - IsComment In "GlobalScope" from "None" line 2": + +check_preprocessor_protection_3.h - IsPreprocessorStatement In "GlobalScope" from "None" line 3": + +check_preprocessor_protection_3.h - IsEmptyLine In "GlobalScope" from "None" line 4": + +check_preprocessor_protection_3.h - IsComment In "GlobalScope" from "None" line 5": + +check_preprocessor_protection_3.h - IsPreprocessorStatement In "GlobalScope" from "None" line 6": + +check_preprocessor_protection_3.h: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: HEADER_PROT_NODEF (line: 3, col: 1): Header protection not containing #define +Error: HEADER_PROT_ALL_AF (line: 6, col: 1): Instructions after header protection are forbidden diff --git a/tests/rules/samples/check_utype_declaration_1.c b/tests/rules/samples/check_utype_declaration_1.c new file mode 100644 index 00000000..850375e0 --- /dev/null +++ b/tests/rules/samples/check_utype_declaration_1.c @@ -0,0 +1,34 @@ +struct +{ + int a; +}; + +void main(void) +{ + struct + { + int b; + }; +} + +// https://github.com/42School/norminette/issues/437 +enum e_endian +{ + LITTLE, + BIG +}; + +enum e_endian which_endian(void) +{ + union + { + unsigned char var2[2]; + unsigned short var1; + } u_endian;// This should be the correct indentation +// } u_endian; + u_endian.var1 = 1; + if (u_endian.var1 == u_endian.var2[0]) + return (LITTLE); + else + return (BIG); +} diff --git a/tests/rules/samples/check_utype_declaration_1.out b/tests/rules/samples/check_utype_declaration_1.out new file mode 100644 index 00000000..a6d12367 --- /dev/null +++ b/tests/rules/samples/check_utype_declaration_1.out @@ -0,0 +1,81 @@ +check_utype_declaration_1.c - IsUserDefinedType In "GlobalScope" from "None" line 1": + +check_utype_declaration_1.c - IsBlockStart In "UserDefinedType" from "GlobalScope" line 2": + +check_utype_declaration_1.c - IsVarDeclaration In "UserDefinedType" from "GlobalScope" line 3": + +check_utype_declaration_1.c - IsBlockEnd In "UserDefinedType" from "GlobalScope" line 4": + +check_utype_declaration_1.c - IsEmptyLine In "GlobalScope" from "None" line 5": + +check_utype_declaration_1.c - IsFuncDeclaration In "GlobalScope" from "None" line 6": + +check_utype_declaration_1.c - IsBlockStart In "Function" from "GlobalScope" line 7": + +check_utype_declaration_1.c - IsUserDefinedType In "Function" from "GlobalScope" line 8": + +check_utype_declaration_1.c - IsBlockStart In "UserDefinedType" from "Function" line 9": + +check_utype_declaration_1.c - IsVarDeclaration In "UserDefinedType" from "Function" line 10": + +check_utype_declaration_1.c - IsBlockEnd In "UserDefinedType" from "Function" line 11": + +check_utype_declaration_1.c - IsBlockEnd In "Function" from "GlobalScope" line 12": + +check_utype_declaration_1.c - IsEmptyLine In "GlobalScope" from "None" line 13": + +check_utype_declaration_1.c - IsComment In "GlobalScope" from "None" line 14": + +check_utype_declaration_1.c - IsUserDefinedType In "GlobalScope" from "None" line 15": + +check_utype_declaration_1.c - IsBlockStart In "UserDefinedEnum" from "GlobalScope" line 16": + +check_utype_declaration_1.c - IsEnumVarDecl In "UserDefinedEnum" from "GlobalScope" line 17": + +check_utype_declaration_1.c - IsEnumVarDecl In "UserDefinedEnum" from "GlobalScope" line 18": + +check_utype_declaration_1.c - IsBlockEnd In "UserDefinedEnum" from "GlobalScope" line 19": + +check_utype_declaration_1.c - IsEmptyLine In "GlobalScope" from "None" line 20": + +check_utype_declaration_1.c - IsFuncDeclaration In "GlobalScope" from "None" line 21": + +check_utype_declaration_1.c - IsBlockStart In "Function" from "GlobalScope" line 22": + +check_utype_declaration_1.c - IsUserDefinedType In "Function" from "GlobalScope" line 23": + +check_utype_declaration_1.c - IsBlockStart In "UserDefinedType" from "Function" line 24": + +check_utype_declaration_1.c - IsVarDeclaration In "UserDefinedType" from "Function" line 25": + +check_utype_declaration_1.c - IsVarDeclaration In "UserDefinedType" from "Function" line 26": + +check_utype_declaration_1.c - IsBlockEnd In "UserDefinedType" from "Function" line 27": + +check_utype_declaration_1.c - IsComment In "Function" from "GlobalScope" line 27": + +check_utype_declaration_1.c - IsComment In "Function" from "GlobalScope" line 28": + +check_utype_declaration_1.c - IsAssignation In "Function" from "GlobalScope" line 29": + +check_utype_declaration_1.c - IsControlStatement In "Function" from "GlobalScope" line 30": + +check_utype_declaration_1.c - IsExpressionStatement In "ControlStructure" from "Function" line 31": + +check_utype_declaration_1.c - IsControlStatement In "Function" from "GlobalScope" line 32": + +check_utype_declaration_1.c - IsExpressionStatement In "ControlStructure" from "Function" line 33": + +check_utype_declaration_1.c - IsBlockEnd In "Function" from "GlobalScope" line 34": + +check_utype_declaration_1.c: Error! +Error: FORBIDDEN_STRUCT (line: 1, col: 1): Struct declaration are not allowed in .c files +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: FORBIDDEN_STRUCT (line: 8, col: 5): Struct declaration are not allowed in .c files +Error: TYPE_NOT_GLOBAL (line: 8, col: 5): Enums, structs and unions need to be defined only in global scope +Error: FORBIDDEN_ENUM (line: 15, col: 1): Enum declaration are not allowed in .c files +Error: FORBIDDEN_UNION (line: 23, col: 5): Union declaration are not allowed in .c files +Error: TYPE_NOT_GLOBAL (line: 23, col: 5): Enums, structs and unions need to be defined only in global scope +Error: WRONG_SCOPE_COMMENT (line: 27, col: 18): Comment is invalid in this scope +Error: WRONG_SCOPE_COMMENT (line: 27, col: 18): Comment is invalid in this scope +Error: WRONG_SCOPE_COMMENT (line: 28, col: 1): Comment is invalid in this scope diff --git a/tests/rules/samples/check_utype_declaration_2.h b/tests/rules/samples/check_utype_declaration_2.h new file mode 100644 index 00000000..4dcb59af --- /dev/null +++ b/tests/rules/samples/check_utype_declaration_2.h @@ -0,0 +1,12 @@ +struct +{ + int a; +}; + +struct +{ + struct + { + int b; + } s_a; +}; diff --git a/tests/rules/samples/check_utype_declaration_2.out b/tests/rules/samples/check_utype_declaration_2.out new file mode 100644 index 00000000..bbdb5617 --- /dev/null +++ b/tests/rules/samples/check_utype_declaration_2.out @@ -0,0 +1,26 @@ +check_utype_declaration_2.h - IsUserDefinedType In "GlobalScope" from "None" line 1": + +check_utype_declaration_2.h - IsBlockStart In "UserDefinedType" from "GlobalScope" line 2": + +check_utype_declaration_2.h - IsVarDeclaration In "UserDefinedType" from "GlobalScope" line 3": + +check_utype_declaration_2.h - IsBlockEnd In "UserDefinedType" from "GlobalScope" line 4": + +check_utype_declaration_2.h - IsEmptyLine In "GlobalScope" from "None" line 5": + +check_utype_declaration_2.h - IsUserDefinedType In "GlobalScope" from "None" line 6": + +check_utype_declaration_2.h - IsBlockStart In "UserDefinedType" from "GlobalScope" line 7": + +check_utype_declaration_2.h - IsUserDefinedType In "UserDefinedType" from "GlobalScope" line 8": + +check_utype_declaration_2.h - IsBlockStart In "UserDefinedType" from "UserDefinedType" line 9": + +check_utype_declaration_2.h - IsVarDeclaration In "UserDefinedType" from "UserDefinedType" line 10": + +check_utype_declaration_2.h - IsBlockEnd In "UserDefinedType" from "UserDefinedType" line 11": + +check_utype_declaration_2.h - IsBlockEnd In "UserDefinedType" from "GlobalScope" line 12": + +check_utype_declaration_2.h: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/tests/rules/samples/glued_operators.c b/tests/rules/samples/glued_operators.c new file mode 100644 index 00000000..e64c6eb3 --- /dev/null +++ b/tests/rules/samples/glued_operators.c @@ -0,0 +1,21 @@ +int main(void) +{ + int i; + int *j; + int k; + + k = 0; + j = &k; + i = -!j; + i = -!*j; + i = -!!j; + i = ~*j; + i = ~!j; + i = ~!j; + i = -~k; + i = !-k; + i = !+k; + i = !~k; + i = ~!~k; + i = ~~~~~~~~k; +} diff --git a/tests/rules/samples/glued_operators.out b/tests/rules/samples/glued_operators.out new file mode 100644 index 00000000..524281f9 --- /dev/null +++ b/tests/rules/samples/glued_operators.out @@ -0,0 +1,44 @@ +glued_operators.c - IsFuncDeclaration In "GlobalScope" from "None" line 1": + +glued_operators.c - IsBlockStart In "Function" from "GlobalScope" line 2": + +glued_operators.c - IsVarDeclaration In "Function" from "GlobalScope" line 3": + +glued_operators.c - IsVarDeclaration In "Function" from "GlobalScope" line 4": + +glued_operators.c - IsVarDeclaration In "Function" from "GlobalScope" line 5": + +glued_operators.c - IsEmptyLine In "Function" from "GlobalScope" line 6": + +glued_operators.c - IsAssignation In "Function" from "GlobalScope" line 7": + +glued_operators.c - IsAssignation In "Function" from "GlobalScope" line 8": + +glued_operators.c - IsAssignation In "Function" from "GlobalScope" line 9": + +glued_operators.c - IsAssignation In "Function" from "GlobalScope" line 10": + +glued_operators.c - IsAssignation In "Function" from "GlobalScope" line 11": + +glued_operators.c - IsAssignation In "Function" from "GlobalScope" line 12": + +glued_operators.c - IsAssignation In "Function" from "GlobalScope" line 13": + +glued_operators.c - IsAssignation In "Function" from "GlobalScope" line 14": + +glued_operators.c - IsAssignation In "Function" from "GlobalScope" line 15": + +glued_operators.c - IsAssignation In "Function" from "GlobalScope" line 16": + +glued_operators.c - IsAssignation In "Function" from "GlobalScope" line 17": + +glued_operators.c - IsAssignation In "Function" from "GlobalScope" line 18": + +glued_operators.c - IsAssignation In "Function" from "GlobalScope" line 19": + +glued_operators.c - IsAssignation In "Function" from "GlobalScope" line 20": + +glued_operators.c - IsBlockEnd In "Function" from "GlobalScope" line 21": + +glued_operators.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/integer_constants.c b/tests/rules/samples/integer_constants.c similarity index 80% rename from norminette/tests/rules/integer_constants.c rename to tests/rules/samples/integer_constants.c index 64f34bf5..4ee6402d 100644 --- a/norminette/tests/rules/integer_constants.c +++ b/tests/rules/samples/integer_constants.c @@ -25,3 +25,16 @@ int main(void) __int64 hex_i64 = 0x4a44000000000020I64; unsigned __int64 hex_ui64 = 0x8a44000000000040Ui64; } + +int main(long long i, long long int j, long int k, short short l, short short int m, short int n, long o, short p) +{ + long long i; + long long int j; + long int k; + short short l; + short short int m; + short int n; + long o; + short p; +} + diff --git a/tests/rules/samples/integer_constants.out b/tests/rules/samples/integer_constants.out new file mode 100644 index 00000000..24b4b9ad --- /dev/null +++ b/tests/rules/samples/integer_constants.out @@ -0,0 +1,131 @@ +integer_constants.c - IsFuncDeclaration In "GlobalScope" from "None" line 1": + +integer_constants.c - IsBlockStart In "Function" from "GlobalScope" line 2": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 3": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 4": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 5": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 6": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 7": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 8": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 9": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 10": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 11": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 12": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 13": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 14": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 15": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 16": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 17": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 18": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 19": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 20": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 21": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 22": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 23": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 24": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 25": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 26": + +integer_constants.c - IsBlockEnd In "Function" from "GlobalScope" line 27": + +integer_constants.c - IsEmptyLine In "GlobalScope" from "None" line 28": + +integer_constants.c - IsFuncDeclaration In "GlobalScope" from "None" line 29": + +integer_constants.c - IsBlockStart In "Function" from "GlobalScope" line 30": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 31": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 32": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 33": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 34": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 35": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 36": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 37": + +integer_constants.c - IsVarDeclaration In "Function" from "GlobalScope" line 38": + +integer_constants.c - IsBlockEnd In "Function" from "GlobalScope" line 39": + +integer_constants.c - IsEmptyLine In "GlobalScope" from "None" line 40": + +integer_constants.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: DECL_ASSIGN_LINE (line: 3, col: 33): Declaration and assignation on a single line +Error: DECL_ASSIGN_LINE (line: 4, col: 34): Declaration and assignation on a single line +Error: DECL_ASSIGN_LINE (line: 5, col: 34): Declaration and assignation on a single line +Error: DECL_ASSIGN_LINE (line: 6, col: 35): Declaration and assignation on a single line +Error: DECL_ASSIGN_LINE (line: 7, col: 35): Declaration and assignation on a single line +Error: TOO_MANY_VARS_FUNC (line: 8, col: 1): Too many variables declarations in a function +Error: DECL_ASSIGN_LINE (line: 8, col: 36): Declaration and assignation on a single line +Error: TOO_MANY_VARS_FUNC (line: 9, col: 1): Too many variables declarations in a function +Error: DECL_ASSIGN_LINE (line: 9, col: 33): Declaration and assignation on a single line +Error: TOO_MANY_VARS_FUNC (line: 10, col: 1): Too many variables declarations in a function +Error: DECL_ASSIGN_LINE (line: 10, col: 34): Declaration and assignation on a single line +Error: TOO_MANY_VARS_FUNC (line: 11, col: 1): Too many variables declarations in a function +Error: DECL_ASSIGN_LINE (line: 11, col: 33): Declaration and assignation on a single line +Error: TOO_MANY_VARS_FUNC (line: 12, col: 1): Too many variables declarations in a function +Error: DECL_ASSIGN_LINE (line: 12, col: 34): Declaration and assignation on a single line +Error: TOO_MANY_VARS_FUNC (line: 13, col: 1): Too many variables declarations in a function +Error: DECL_ASSIGN_LINE (line: 13, col: 34): Declaration and assignation on a single line +Error: TOO_MANY_VARS_FUNC (line: 14, col: 1): Too many variables declarations in a function +Error: DECL_ASSIGN_LINE (line: 14, col: 35): Declaration and assignation on a single line +Error: TOO_MANY_VARS_FUNC (line: 15, col: 1): Too many variables declarations in a function +Error: DECL_ASSIGN_LINE (line: 15, col: 35): Declaration and assignation on a single line +Error: TOO_MANY_VARS_FUNC (line: 16, col: 1): Too many variables declarations in a function +Error: DECL_ASSIGN_LINE (line: 16, col: 36): Declaration and assignation on a single line +Error: TOO_MANY_VARS_FUNC (line: 17, col: 1): Too many variables declarations in a function +Error: DECL_ASSIGN_LINE (line: 17, col: 33): Declaration and assignation on a single line +Error: TOO_MANY_VARS_FUNC (line: 18, col: 1): Too many variables declarations in a function +Error: DECL_ASSIGN_LINE (line: 18, col: 34): Declaration and assignation on a single line +Error: TOO_MANY_VARS_FUNC (line: 19, col: 1): Too many variables declarations in a function +Error: DECL_ASSIGN_LINE (line: 19, col: 33): Declaration and assignation on a single line +Error: TOO_MANY_VARS_FUNC (line: 20, col: 1): Too many variables declarations in a function +Error: DECL_ASSIGN_LINE (line: 20, col: 34): Declaration and assignation on a single line +Error: TOO_MANY_VARS_FUNC (line: 21, col: 1): Too many variables declarations in a function +Error: DECL_ASSIGN_LINE (line: 21, col: 34): Declaration and assignation on a single line +Error: TOO_MANY_VARS_FUNC (line: 22, col: 1): Too many variables declarations in a function +Error: DECL_ASSIGN_LINE (line: 22, col: 35): Declaration and assignation on a single line +Error: TOO_MANY_VARS_FUNC (line: 23, col: 1): Too many variables declarations in a function +Error: DECL_ASSIGN_LINE (line: 23, col: 35): Declaration and assignation on a single line +Error: TOO_MANY_VARS_FUNC (line: 24, col: 1): Too many variables declarations in a function +Error: DECL_ASSIGN_LINE (line: 24, col: 36): Declaration and assignation on a single line +Error: TOO_MANY_VARS_FUNC (line: 25, col: 1): Too many variables declarations in a function +Error: DECL_ASSIGN_LINE (line: 25, col: 33): Declaration and assignation on a single line +Error: TOO_MANY_VARS_FUNC (line: 26, col: 1): Too many variables declarations in a function +Error: DECL_ASSIGN_LINE (line: 26, col: 34): Declaration and assignation on a single line +Error: LINE_TOO_LONG (line: 29, col: 82): line too long +Error: TOO_MANY_ARGS (line: 29, col: 115): Function has more than 4 arguments +Error: TOO_MANY_VARS_FUNC (line: 36, col: 1): Too many variables declarations in a function +Error: TOO_MANY_VARS_FUNC (line: 37, col: 1): Too many variables declarations in a function +Error: TOO_MANY_VARS_FUNC (line: 38, col: 1): Too many variables declarations in a function +Error: EMPTY_LINE_EOF (line: 40, col: 1): Empty line at end of file diff --git a/norminette/tests/rules/ko_func_name.c b/tests/rules/samples/ko_func_name.c similarity index 100% rename from norminette/tests/rules/ko_func_name.c rename to tests/rules/samples/ko_func_name.c diff --git a/norminette/tests/rules/ko_func_name.out b/tests/rules/samples/ko_func_name.out similarity index 84% rename from norminette/tests/rules/ko_func_name.out rename to tests/rules/samples/ko_func_name.out index 7a683f55..1ac20115 100644 --- a/norminette/tests/rules/ko_func_name.out +++ b/tests/rules/samples/ko_func_name.out @@ -4,4 +4,5 @@ ko_func_name.c - IsFuncPrototype In "GlobalScope" from "None" line 3": -ko_func_name.c: OK! +ko_func_name.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/tests/rules/samples/ko_func_name2.c b/tests/rules/samples/ko_func_name2.c new file mode 100644 index 00000000..c0610acb --- /dev/null +++ b/tests/rules/samples/ko_func_name2.c @@ -0,0 +1,21 @@ +int main (void) +{ + return (42); +} + +int main (void) +{ + return (21); +} + +int main (void) +{ + int array[] = {1, 2, 3, 4, 5}; + + return (0); +} + +int _1 a b c(void) +{ + return ; +} diff --git a/tests/rules/samples/ko_func_name2.out b/tests/rules/samples/ko_func_name2.out new file mode 100644 index 00000000..0c1bf040 --- /dev/null +++ b/tests/rules/samples/ko_func_name2.out @@ -0,0 +1,55 @@ +ko_func_name2.c - IsFuncDeclaration In "GlobalScope" from "None" line 1": + +ko_func_name2.c - IsBlockStart In "Function" from "GlobalScope" line 2": + +ko_func_name2.c - IsExpressionStatement In "Function" from "GlobalScope" line 3": + +ko_func_name2.c - IsBlockEnd In "Function" from "GlobalScope" line 4": + +ko_func_name2.c - IsEmptyLine In "GlobalScope" from "None" line 5": + +ko_func_name2.c - IsFuncDeclaration In "GlobalScope" from "None" line 6": + +ko_func_name2.c - IsBlockStart In "Function" from "GlobalScope" line 7": + +ko_func_name2.c - IsExpressionStatement In "Function" from "GlobalScope" line 8": + +ko_func_name2.c - IsBlockEnd In "Function" from "GlobalScope" line 9": + +ko_func_name2.c - IsEmptyLine In "GlobalScope" from "None" line 10": + +ko_func_name2.c - IsFuncDeclaration In "GlobalScope" from "None" line 11": + +ko_func_name2.c - IsBlockStart In "Function" from "GlobalScope" line 12": + +ko_func_name2.c - IsVarDeclaration In "Function" from "GlobalScope" line 13": + +ko_func_name2.c - IsEmptyLine In "Function" from "GlobalScope" line 14": + +ko_func_name2.c - IsExpressionStatement In "Function" from "GlobalScope" line 15": + +ko_func_name2.c - IsBlockEnd In "Function" from "GlobalScope" line 16": + +ko_func_name2.c - IsEmptyLine In "GlobalScope" from "None" line 17": + +ko_func_name2.c - IsFuncDeclaration In "GlobalScope" from "None" line 18": + +ko_func_name2.c - IsBlockStart In "Function" from "GlobalScope" line 19": + +ko_func_name2.c - IsExpressionStatement In "Function" from "GlobalScope" line 20": + +ko_func_name2.c - IsBlockEnd In "Function" from "GlobalScope" line 21": + +ko_func_name2.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: EXP_PARENTHESIS (line: 1, col: 9): Expected parenthesis +Error: EXP_PARENTHESIS (line: 6, col: 9): Expected parenthesis +Error: SPACE_BEFORE_FUNC (line: 11, col: 4): Found space when expecting tab before function name +Error: EXP_PARENTHESIS (line: 11, col: 9): Expected parenthesis +Error: TOO_FEW_TAB (line: 13, col: 1): Missing tabs for indent level +Error: SPACE_REPLACE_TAB (line: 13, col: 5): Found space when expecting tab +Error: SPACE_REPLACE_TAB (line: 13, col: 8): Found space when expecting tab +Error: DECL_ASSIGN_LINE (line: 13, col: 17): Declaration and assignation on a single line +Error: TOO_FEW_TAB (line: 15, col: 1): Missing tabs for indent level +Error: SPACE_REPLACE_TAB (line: 15, col: 5): Found space when expecting tab +Error: SPACE_BEFORE_FUNC (line: 18, col: 11): Found space when expecting tab before function name diff --git a/norminette/tests/rules/ko_include.c b/tests/rules/samples/ko_include.c similarity index 100% rename from norminette/tests/rules/ko_include.c rename to tests/rules/samples/ko_include.c diff --git a/tests/rules/samples/ko_include.out b/tests/rules/samples/ko_include.out new file mode 100644 index 00000000..c3ba1d79 --- /dev/null +++ b/tests/rules/samples/ko_include.out @@ -0,0 +1,11 @@ +ko_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": + +ko_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": + +ko_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 3": + +ko_include.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: INCLUDE_HEADER_ONLY (line: 1, col: 10): .c file includes are forbidden +Error: MACRO_NAME_CAPITAL (line: 2, col: 9): Macro name must be capitalized +Error: INCLUDE_HEADER_ONLY (line: 3, col: 10): .c file includes are forbidden diff --git a/tests/rules/samples/ko_include2.c b/tests/rules/samples/ko_include2.c new file mode 100644 index 00000000..b081dc79 --- /dev/null +++ b/tests/rules/samples/ko_include2.c @@ -0,0 +1,6 @@ +#include"libft.h" +#include "libft.h" +#include "libft.h" +#include +#include +#include diff --git a/tests/rules/samples/ko_include2.out b/tests/rules/samples/ko_include2.out new file mode 100644 index 00000000..899f65c6 --- /dev/null +++ b/tests/rules/samples/ko_include2.out @@ -0,0 +1,22 @@ +ko_include2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": + +ko_include2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": + +ko_include2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 3": + +ko_include2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 4": + +ko_include2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 5": + +ko_include2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 6": + +ko_include2.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: PREPROC_NO_SPACE (line: 1, col: 9): Missing space after preprocessor directive +Error: TAB_REPLACE_SPACE (line: 2, col: 9): Found tab when expecting space +Error: CONSECUTIVE_WS (line: 3, col: 9): Two or more consecutives white spaces +Error: TAB_REPLACE_SPACE (line: 3, col: 9): Found tab when expecting space +Error: PREPROC_NO_SPACE (line: 4, col: 9): Missing space after preprocessor directive +Error: TAB_REPLACE_SPACE (line: 5, col: 9): Found tab when expecting space +Error: CONSECUTIVE_WS (line: 6, col: 9): Two or more consecutives white spaces +Error: TAB_REPLACE_SPACE (line: 6, col: 9): Found tab when expecting space diff --git a/norminette/tests/rules/ko_pointer1.c b/tests/rules/samples/ko_pointer1.c similarity index 100% rename from norminette/tests/rules/ko_pointer1.c rename to tests/rules/samples/ko_pointer1.c diff --git a/norminette/tests/rules/ko_pointer1.out b/tests/rules/samples/ko_pointer1.out similarity index 62% rename from norminette/tests/rules/ko_pointer1.out rename to tests/rules/samples/ko_pointer1.out index 2b7b4b63..2f44e275 100644 --- a/norminette/tests/rules/ko_pointer1.out +++ b/tests/rules/samples/ko_pointer1.out @@ -62,38 +62,39 @@ ko_pointer1.c - IsBlockEnd In "Function" from "GlobalScope" line 23": -ko_pointer1.c: KO! - BRACE_NEWLINE (line: 1, col: 16): Expected newline before brace - SPC_AFTER_POINTER (line: 2, col: 11): space after pointer - SPC_AFTER_POINTER (line: 2, col: 12): space after pointer - BRACE_NEWLINE (line: 5, col: 16): Expected newline before brace - SPC_AFTER_POINTER (line: 6, col: 11): space after pointer - SPC_AFTER_POINTER (line: 6, col: 13): space after pointer - NL_AFTER_VAR_DECL (line: 7, col: 21): Variable declarations must be followed by a newline - TOO_FEW_TAB (line: 7, col: 21): Missing tabs for indent level - TOO_MANY_INSTR (line: 7, col: 21): Too many instructions on a single line - NO_SPC_AFR_PAR (line: 7, col: 38): Extra space after parenthesis (brace/bracket) - NO_SPC_AFR_PAR (line: 7, col: 40): Extra space after parenthesis (brace/bracket) - NO_SPC_BFR_PAR (line: 7, col: 40): Extra space before parenthesis (brace/bracket) - NO_SPC_AFR_PAR (line: 7, col: 42): Extra space after parenthesis (brace/bracket) - NO_SPC_BFR_PAR (line: 7, col: 42): Extra space before parenthesis (brace/bracket) - NO_SPC_BFR_PAR (line: 7, col: 44): Extra space before parenthesis (brace/bracket) - BRACE_NEWLINE (line: 10, col: 16): Expected newline before brace - SPC_AFTER_POINTER (line: 11, col: 11): space after pointer - SPC_AFTER_POINTER (line: 11, col: 13): space after pointer - NL_AFTER_VAR_DECL (line: 12, col: 21): Variable declarations must be followed by a newline - TOO_FEW_TAB (line: 12, col: 21): Missing tabs for indent level - TOO_MANY_INSTR (line: 12, col: 21): Too many instructions on a single line - BRACE_NEWLINE (line: 15, col: 16): Expected newline before brace - SPC_AFTER_POINTER (line: 16, col: 11): space after pointer - SPC_AFTER_POINTER (line: 16, col: 13): space after pointer - NL_AFTER_VAR_DECL (line: 17, col: 21): Variable declarations must be followed by a newline - TOO_FEW_TAB (line: 17, col: 21): Missing tabs for indent level - TOO_MANY_INSTR (line: 17, col: 21): Too many instructions on a single line - BRACE_NEWLINE (line: 20, col: 16): Expected newline before brace - SPC_AFTER_POINTER (line: 21, col: 11): space after pointer - SPC_AFTER_POINTER (line: 21, col: 13): space after pointer - NL_AFTER_VAR_DECL (line: 22, col: 21): Variable declarations must be followed by a newline - TOO_FEW_TAB (line: 22, col: 21): Missing tabs for indent level - TOO_MANY_INSTR (line: 22, col: 21): Too many instructions on a single line - BRACE_SHOULD_EOL (line: 23, col: 1): Expected newline after brace +ko_pointer1.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: BRACE_NEWLINE (line: 1, col: 16): Expected newline before brace +Error: SPC_AFTER_POINTER (line: 2, col: 11): space after pointer +Error: SPC_AFTER_POINTER (line: 2, col: 12): space after pointer +Error: BRACE_NEWLINE (line: 5, col: 16): Expected newline before brace +Error: SPC_AFTER_POINTER (line: 6, col: 11): space after pointer +Error: SPC_AFTER_POINTER (line: 6, col: 13): space after pointer +Error: NL_AFTER_VAR_DECL (line: 7, col: 21): Variable declarations must be followed by a newline +Error: TOO_FEW_TAB (line: 7, col: 21): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 7, col: 21): Too many instructions on a single line +Error: NO_SPC_AFR_PAR (line: 7, col: 38): Extra space after parenthesis (brace/bracket) +Error: NO_SPC_AFR_PAR (line: 7, col: 40): Extra space after parenthesis (brace/bracket) +Error: NO_SPC_BFR_PAR (line: 7, col: 40): Extra space before parenthesis (brace/bracket) +Error: NO_SPC_AFR_PAR (line: 7, col: 42): Extra space after parenthesis (brace/bracket) +Error: NO_SPC_BFR_PAR (line: 7, col: 42): Extra space before parenthesis (brace/bracket) +Error: NO_SPC_BFR_PAR (line: 7, col: 44): Extra space before parenthesis (brace/bracket) +Error: BRACE_NEWLINE (line: 10, col: 16): Expected newline before brace +Error: SPC_AFTER_POINTER (line: 11, col: 11): space after pointer +Error: SPC_AFTER_POINTER (line: 11, col: 13): space after pointer +Error: NL_AFTER_VAR_DECL (line: 12, col: 21): Variable declarations must be followed by a newline +Error: TOO_FEW_TAB (line: 12, col: 21): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 12, col: 21): Too many instructions on a single line +Error: BRACE_NEWLINE (line: 15, col: 16): Expected newline before brace +Error: SPC_AFTER_POINTER (line: 16, col: 11): space after pointer +Error: SPC_AFTER_POINTER (line: 16, col: 13): space after pointer +Error: NL_AFTER_VAR_DECL (line: 17, col: 21): Variable declarations must be followed by a newline +Error: TOO_FEW_TAB (line: 17, col: 21): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 17, col: 21): Too many instructions on a single line +Error: BRACE_NEWLINE (line: 20, col: 16): Expected newline before brace +Error: SPC_AFTER_POINTER (line: 21, col: 11): space after pointer +Error: SPC_AFTER_POINTER (line: 21, col: 13): space after pointer +Error: NL_AFTER_VAR_DECL (line: 22, col: 21): Variable declarations must be followed by a newline +Error: TOO_FEW_TAB (line: 22, col: 21): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 22, col: 21): Too many instructions on a single line +Error: BRACE_SHOULD_EOL (line: 23, col: 1): Expected newline after brace diff --git a/norminette/tests/rules/ko_pointer2.c b/tests/rules/samples/ko_pointer2.c similarity index 100% rename from norminette/tests/rules/ko_pointer2.c rename to tests/rules/samples/ko_pointer2.c diff --git a/norminette/tests/rules/ko_pointer2.out b/tests/rules/samples/ko_pointer2.out similarity index 63% rename from norminette/tests/rules/ko_pointer2.out rename to tests/rules/samples/ko_pointer2.out index 0a069e0a..102f2e82 100644 --- a/norminette/tests/rules/ko_pointer2.out +++ b/tests/rules/samples/ko_pointer2.out @@ -49,7 +49,7 @@ ko_pointer2.c - IsAssignation In "ControlStructure" from "Function" line 20": -ko_pointer2.c - IsDeclaration In "ControlStructure" from "Function" line 21": +ko_pointer2.c - IsVarDeclaration In "ControlStructure" from "Function" line 21": ko_pointer2.c - IsBlockEnd In "ControlStructure" from "Function" line 23": @@ -92,45 +92,55 @@ ko_pointer2.c - IsBlockEnd In "Function" from "GlobalScope" line 37": -ko_pointer2.c: KO! - BRACE_NEWLINE (line: 1, col: 16): Expected newline before brace - SPC_AFTER_POINTER (line: 2, col: 11): space after pointer - SPC_AFTER_POINTER (line: 2, col: 13): space after pointer - NL_AFTER_VAR_DECL (line: 3, col: 21): Variable declarations must be followed by a newline - TOO_FEW_TAB (line: 3, col: 21): Missing tabs for indent level - TOO_MANY_INSTR (line: 3, col: 21): Too many instructions on a single line - BRACE_NEWLINE (line: 6, col: 16): Expected newline before brace - SPC_AFTER_POINTER (line: 7, col: 11): space after pointer - SPC_AFTER_POINTER (line: 7, col: 13): space after pointer - NL_AFTER_VAR_DECL (line: 8, col: 21): Variable declarations must be followed by a newline - TOO_FEW_TAB (line: 8, col: 21): Missing tabs for indent level - TOO_MANY_INSTR (line: 8, col: 21): Too many instructions on a single line - TAB_INSTEAD_SPC (line: 8, col: 24): Found tab when expecting space - TAB_INSTEAD_SPC (line: 10, col: 13): Found tab when expecting space - BRACE_NEWLINE (line: 16, col: 16): Expected newline before brace - SPC_AFTER_POINTER (line: 17, col: 11): space after pointer - SPC_AFTER_POINTER (line: 17, col: 13): space after pointer - NL_AFTER_VAR_DECL (line: 18, col: 21): Variable declarations must be followed by a newline - TOO_FEW_TAB (line: 18, col: 21): Missing tabs for indent level - TOO_MANY_INSTR (line: 18, col: 21): Too many instructions on a single line - TAB_INSTEAD_SPC (line: 18, col: 24): Found tab when expecting space - TAB_INSTEAD_SPC (line: 20, col: 13): Found tab when expecting space - BRACE_NEWLINE (line: 26, col: 16): Expected newline before brace - SPC_AFTER_POINTER (line: 27, col: 11): space after pointer - SPC_AFTER_POINTER (line: 27, col: 13): space after pointer - NL_AFTER_VAR_DECL (line: 28, col: 21): Variable declarations must be followed by a newline - TOO_FEW_TAB (line: 28, col: 21): Missing tabs for indent level - TOO_MANY_INSTR (line: 28, col: 21): Too many instructions on a single line - TAB_INSTEAD_SPC (line: 28, col: 24): Found tab when expecting space - NO_SPC_AFR_PAR (line: 28, col: 58): Extra space after parenthesis (brace/bracket) - TOO_FEW_TAB (line: 28, col: 60): Missing tabs for indent level - TAB_INSTEAD_SPC (line: 29, col: 13): Found tab when expecting space - BRACE_NEWLINE (line: 33, col: 16): Expected newline before brace - SPC_AFTER_POINTER (line: 34, col: 11): space after pointer - SPC_AFTER_POINTER (line: 34, col: 13): space after pointer - NL_AFTER_VAR_DECL (line: 35, col: 21): Variable declarations must be followed by a newline - TOO_FEW_TAB (line: 35, col: 21): Missing tabs for indent level - TOO_MANY_INSTR (line: 35, col: 21): Too many instructions on a single line - TAB_INSTEAD_SPC (line: 35, col: 24): Found tab when expecting space - SPC_AFTER_POINTER (line: 35, col: 56): space after pointer - TAB_INSTEAD_SPC (line: 36, col: 13): Found tab when expecting space +ko_pointer2.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: BRACE_NEWLINE (line: 1, col: 16): Expected newline before brace +Error: SPC_AFTER_POINTER (line: 2, col: 11): space after pointer +Error: SPC_AFTER_POINTER (line: 2, col: 13): space after pointer +Error: NL_AFTER_VAR_DECL (line: 3, col: 21): Variable declarations must be followed by a newline +Error: TOO_FEW_TAB (line: 3, col: 21): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 3, col: 21): Too many instructions on a single line +Error: BRACE_NEWLINE (line: 6, col: 16): Expected newline before brace +Error: SPC_AFTER_POINTER (line: 7, col: 11): space after pointer +Error: SPC_AFTER_POINTER (line: 7, col: 13): space after pointer +Error: NL_AFTER_VAR_DECL (line: 8, col: 21): Variable declarations must be followed by a newline +Error: TOO_FEW_TAB (line: 8, col: 21): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 8, col: 21): Too many instructions on a single line +Error: MIXED_SPACE_TAB (line: 8, col: 23): Mixed spaces and tabs +Error: TAB_INSTEAD_SPC (line: 8, col: 24): Found tab when expecting space +Error: MIXED_SPACE_TAB (line: 10, col: 12): Mixed spaces and tabs +Error: TAB_INSTEAD_SPC (line: 10, col: 13): Found tab when expecting space +Error: BRACE_NEWLINE (line: 16, col: 16): Expected newline before brace +Error: SPC_AFTER_POINTER (line: 17, col: 11): space after pointer +Error: SPC_AFTER_POINTER (line: 17, col: 13): space after pointer +Error: NL_AFTER_VAR_DECL (line: 18, col: 21): Variable declarations must be followed by a newline +Error: TOO_FEW_TAB (line: 18, col: 21): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 18, col: 21): Too many instructions on a single line +Error: MIXED_SPACE_TAB (line: 18, col: 23): Mixed spaces and tabs +Error: TAB_INSTEAD_SPC (line: 18, col: 24): Found tab when expecting space +Error: MIXED_SPACE_TAB (line: 20, col: 12): Mixed spaces and tabs +Error: TAB_INSTEAD_SPC (line: 20, col: 13): Found tab when expecting space +Error: WRONG_SCOPE_VAR (line: 21, col: 1): Variable declared in incorrect scope +Error: BRACE_NEWLINE (line: 26, col: 16): Expected newline before brace +Error: SPC_AFTER_POINTER (line: 27, col: 11): space after pointer +Error: SPC_AFTER_POINTER (line: 27, col: 13): space after pointer +Error: NL_AFTER_VAR_DECL (line: 28, col: 21): Variable declarations must be followed by a newline +Error: TOO_FEW_TAB (line: 28, col: 21): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 28, col: 21): Too many instructions on a single line +Error: MIXED_SPACE_TAB (line: 28, col: 23): Mixed spaces and tabs +Error: TAB_INSTEAD_SPC (line: 28, col: 24): Found tab when expecting space +Error: NO_SPC_AFR_PAR (line: 28, col: 58): Extra space after parenthesis (brace/bracket) +Error: TOO_FEW_TAB (line: 28, col: 60): Missing tabs for indent level +Error: MIXED_SPACE_TAB (line: 29, col: 12): Mixed spaces and tabs +Error: TAB_INSTEAD_SPC (line: 29, col: 13): Found tab when expecting space +Error: BRACE_NEWLINE (line: 33, col: 16): Expected newline before brace +Error: SPC_AFTER_POINTER (line: 34, col: 11): space after pointer +Error: SPC_AFTER_POINTER (line: 34, col: 13): space after pointer +Error: NL_AFTER_VAR_DECL (line: 35, col: 21): Variable declarations must be followed by a newline +Error: TOO_FEW_TAB (line: 35, col: 21): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 35, col: 21): Too many instructions on a single line +Error: MIXED_SPACE_TAB (line: 35, col: 23): Mixed spaces and tabs +Error: TAB_INSTEAD_SPC (line: 35, col: 24): Found tab when expecting space +Error: SPC_AFTER_POINTER (line: 35, col: 56): space after pointer +Error: MIXED_SPACE_TAB (line: 36, col: 12): Mixed spaces and tabs +Error: TAB_INSTEAD_SPC (line: 36, col: 13): Found tab when expecting space diff --git a/norminette/tests/rules/ko_pointer3.c b/tests/rules/samples/ko_pointer3.c similarity index 100% rename from norminette/tests/rules/ko_pointer3.c rename to tests/rules/samples/ko_pointer3.c diff --git a/norminette/tests/rules/ko_pointer3.out b/tests/rules/samples/ko_pointer3.out similarity index 55% rename from norminette/tests/rules/ko_pointer3.out rename to tests/rules/samples/ko_pointer3.out index 47acb68c..01e132cc 100644 --- a/norminette/tests/rules/ko_pointer3.out +++ b/tests/rules/samples/ko_pointer3.out @@ -57,44 +57,52 @@ ko_pointer3.c - IsBlockEnd In "Function" from "GlobalScope" line 23": -ko_pointer3.c: KO! - BRACE_NEWLINE (line: 1, col: 16): Expected newline before brace - SPC_AFTER_POINTER (line: 2, col: 11): space after pointer - SPC_AFTER_POINTER (line: 2, col: 13): space after pointer - NL_AFTER_VAR_DECL (line: 3, col: 21): Variable declarations must be followed by a newline - TOO_FEW_TAB (line: 3, col: 21): Missing tabs for indent level - TOO_MANY_INSTR (line: 3, col: 21): Too many instructions on a single line - TAB_INSTEAD_SPC (line: 3, col: 24): Found tab when expecting space - SPC_AFTER_POINTER (line: 3, col: 56): space after pointer - SPC_AFTER_POINTER (line: 3, col: 57): space after pointer - TAB_INSTEAD_SPC (line: 4, col: 13): Found tab when expecting space - BRACE_NEWLINE (line: 7, col: 16): Expected newline before brace - SPC_AFTER_POINTER (line: 8, col: 11): space after pointer - SPC_AFTER_POINTER (line: 8, col: 13): space after pointer - NL_AFTER_VAR_DECL (line: 9, col: 21): Variable declarations must be followed by a newline - TOO_FEW_TAB (line: 9, col: 21): Missing tabs for indent level - TOO_MANY_INSTR (line: 9, col: 21): Too many instructions on a single line - TAB_INSTEAD_SPC (line: 9, col: 24): Found tab when expecting space - TAB_INSTEAD_SPC (line: 10, col: 13): Found tab when expecting space - BRACE_NEWLINE (line: 13, col: 16): Expected newline before brace - SPC_AFTER_POINTER (line: 14, col: 11): space after pointer - SPC_AFTER_POINTER (line: 14, col: 13): space after pointer - NL_AFTER_VAR_DECL (line: 15, col: 21): Variable declarations must be followed by a newline - TOO_FEW_TAB (line: 15, col: 21): Missing tabs for indent level - TOO_MANY_INSTR (line: 15, col: 21): Too many instructions on a single line - NO_SPC_AFR_PAR (line: 15, col: 38): Extra space after parenthesis (brace/bracket) - NO_SPC_AFR_PAR (line: 15, col: 40): Extra space after parenthesis (brace/bracket) - NO_SPC_BFR_PAR (line: 15, col: 40): Extra space before parenthesis (brace/bracket) - NO_SPC_AFR_PAR (line: 15, col: 42): Extra space after parenthesis (brace/bracket) - NO_SPC_BFR_PAR (line: 15, col: 42): Extra space before parenthesis (brace/bracket) - NO_SPC_BFR_PAR (line: 15, col: 44): Extra space before parenthesis (brace/bracket) - TAB_INSTEAD_SPC (line: 16, col: 9): Found tab when expecting space - NO_SPC_BFR_PAR (line: 16, col: 33): Extra space before parenthesis (brace/bracket) - BRACE_NEWLINE (line: 19, col: 16): Expected newline before brace - TAB_REPLACE_SPACE (line: 20, col: 8): Found tab when expecting space - SPACE_REPLACE_TAB (line: 20, col: 13): Found space when expecting tab - SPC_AFTER_POINTER (line: 21, col: 5): space after pointer - SPC_AFTER_POINTER (line: 21, col: 10): space after pointer - SPC_AFTER_POINTER (line: 21, col: 12): space after pointer - SPC_AFTER_POINTER (line: 21, col: 14): space after pointer - BRACE_SHOULD_EOL (line: 23, col: 1): Expected newline after brace +ko_pointer3.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: BRACE_NEWLINE (line: 1, col: 16): Expected newline before brace +Error: SPC_AFTER_POINTER (line: 2, col: 11): space after pointer +Error: SPC_AFTER_POINTER (line: 2, col: 13): space after pointer +Error: NL_AFTER_VAR_DECL (line: 3, col: 21): Variable declarations must be followed by a newline +Error: TOO_FEW_TAB (line: 3, col: 21): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 3, col: 21): Too many instructions on a single line +Error: MIXED_SPACE_TAB (line: 3, col: 23): Mixed spaces and tabs +Error: TAB_INSTEAD_SPC (line: 3, col: 24): Found tab when expecting space +Error: SPC_AFTER_POINTER (line: 3, col: 56): space after pointer +Error: SPC_AFTER_POINTER (line: 3, col: 57): space after pointer +Error: MIXED_SPACE_TAB (line: 4, col: 12): Mixed spaces and tabs +Error: TAB_INSTEAD_SPC (line: 4, col: 13): Found tab when expecting space +Error: BRACE_NEWLINE (line: 7, col: 16): Expected newline before brace +Error: SPC_AFTER_POINTER (line: 8, col: 11): space after pointer +Error: SPC_AFTER_POINTER (line: 8, col: 13): space after pointer +Error: NL_AFTER_VAR_DECL (line: 9, col: 21): Variable declarations must be followed by a newline +Error: TOO_FEW_TAB (line: 9, col: 21): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 9, col: 21): Too many instructions on a single line +Error: MIXED_SPACE_TAB (line: 9, col: 23): Mixed spaces and tabs +Error: TAB_INSTEAD_SPC (line: 9, col: 24): Found tab when expecting space +Error: MIXED_SPACE_TAB (line: 10, col: 12): Mixed spaces and tabs +Error: TAB_INSTEAD_SPC (line: 10, col: 13): Found tab when expecting space +Error: BRACE_NEWLINE (line: 13, col: 16): Expected newline before brace +Error: SPC_AFTER_POINTER (line: 14, col: 11): space after pointer +Error: SPC_AFTER_POINTER (line: 14, col: 13): space after pointer +Error: NL_AFTER_VAR_DECL (line: 15, col: 21): Variable declarations must be followed by a newline +Error: TOO_FEW_TAB (line: 15, col: 21): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 15, col: 21): Too many instructions on a single line +Error: NO_SPC_AFR_PAR (line: 15, col: 38): Extra space after parenthesis (brace/bracket) +Error: NO_SPC_AFR_PAR (line: 15, col: 40): Extra space after parenthesis (brace/bracket) +Error: NO_SPC_BFR_PAR (line: 15, col: 40): Extra space before parenthesis (brace/bracket) +Error: NO_SPC_AFR_PAR (line: 15, col: 42): Extra space after parenthesis (brace/bracket) +Error: NO_SPC_BFR_PAR (line: 15, col: 42): Extra space before parenthesis (brace/bracket) +Error: NO_SPC_BFR_PAR (line: 15, col: 44): Extra space before parenthesis (brace/bracket) +Error: MIXED_SPACE_TAB (line: 16, col: 8): Mixed spaces and tabs +Error: TAB_INSTEAD_SPC (line: 16, col: 9): Found tab when expecting space +Error: NO_SPC_BFR_PAR (line: 16, col: 33): Extra space before parenthesis (brace/bracket) +Error: BRACE_NEWLINE (line: 19, col: 16): Expected newline before brace +Error: TAB_REPLACE_SPACE (line: 20, col: 8): Found tab when expecting space +Error: SPACE_REPLACE_TAB (line: 20, col: 13): Found space when expecting tab +Error: TOO_FEW_TAB (line: 21, col: 1): Missing tabs for indent level +Error: SPC_AFTER_POINTER (line: 21, col: 5): space after pointer +Error: MIXED_SPACE_TAB (line: 21, col: 6): Mixed spaces and tabs +Error: SPC_AFTER_POINTER (line: 21, col: 10): space after pointer +Error: SPC_AFTER_POINTER (line: 21, col: 12): space after pointer +Error: SPC_AFTER_POINTER (line: 21, col: 14): space after pointer +Error: BRACE_SHOULD_EOL (line: 23, col: 1): Expected newline after brace diff --git a/norminette/tests/rules/ko_preproc_define.c b/tests/rules/samples/ko_preproc_define.c similarity index 100% rename from norminette/tests/rules/ko_preproc_define.c rename to tests/rules/samples/ko_preproc_define.c diff --git a/tests/rules/samples/ko_preproc_define.out b/tests/rules/samples/ko_preproc_define.out new file mode 100644 index 00000000..bec80219 --- /dev/null +++ b/tests/rules/samples/ko_preproc_define.out @@ -0,0 +1,21 @@ +ko_preproc_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": + +ko_preproc_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": + +ko_preproc_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 3": + +ko_preproc_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 4": + +ko_preproc_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 5": + +ko_preproc_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 6": + +ko_preproc_define.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: MACRO_NAME_CAPITAL (line: 1, col: 9): Macro name must be capitalized +Error: PREPROC_CONSTANT (line: 1, col: 15): Preprocessor statement must only contain constant defines +Error: PREPROC_CONSTANT (line: 2, col: 13): Preprocessor statement must only contain constant defines +Error: PREPROC_CONSTANT (line: 3, col: 22): Preprocessor statement must only contain constant defines +Error: PREPROC_CONSTANT (line: 5, col: 19): Preprocessor statement must only contain constant defines +Error: PREPROC_START_LINE (line: 6, col: 2): Preprocessor statement not at the beginning of the line +Error: MACRO_NAME_CAPITAL (line: 6, col: 10): Macro name must be capitalized diff --git a/norminette/tests/rules/ko_preproc_indent.c b/tests/rules/samples/ko_preproc_indent.c similarity index 100% rename from norminette/tests/rules/ko_preproc_indent.c rename to tests/rules/samples/ko_preproc_indent.c diff --git a/tests/rules/samples/ko_preproc_indent.out b/tests/rules/samples/ko_preproc_indent.out new file mode 100644 index 00000000..0fb199ad --- /dev/null +++ b/tests/rules/samples/ko_preproc_indent.out @@ -0,0 +1,16 @@ +ko_preproc_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": + +ko_preproc_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": + +ko_preproc_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 3": + +ko_preproc_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 4": + +ko_preproc_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 5": + +ko_preproc_indent.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: TOO_MANY_WS (line: 1, col: 1): Extra whitespaces for indent level +Error: PREPROC_BAD_INDENT (line: 3, col: 1): Bad preprocessor indentation +Error: TOO_MANY_WS (line: 4, col: 1): Extra whitespaces for indent level +Error: TOO_MANY_WS (line: 5, col: 1): Extra whitespaces for indent level diff --git a/norminette/tests/rules/ko_struct_indent.c b/tests/rules/samples/ko_struct_indent.c similarity index 100% rename from norminette/tests/rules/ko_struct_indent.c rename to tests/rules/samples/ko_struct_indent.c diff --git a/norminette/tests/rules/ko_struct_indent.out b/tests/rules/samples/ko_struct_indent.out similarity index 68% rename from norminette/tests/rules/ko_struct_indent.out rename to tests/rules/samples/ko_struct_indent.out index 20328322..d5a8140e 100644 --- a/norminette/tests/rules/ko_struct_indent.out +++ b/tests/rules/samples/ko_struct_indent.out @@ -28,11 +28,17 @@ ko_struct_indent.c - IsBlockEnd In "Function" from "GlobalScope" line 15": -ko_struct_indent.c: KO! - GLOBAL_VAR_NAMING (line: 2, col: 29): Global variable must start with g_ - MISALIGNED_VAR_DECL (line: 2, col: 29): Misaligned variable declaration - MISALIGNED_VAR_DECL (line: 3, col: 29): Misaligned variable declaration - MULT_DECL_LINE (line: 3, col: 34): Multiple declarations on a single line - MISALIGNED_VAR_DECL (line: 7, col: 29): Misaligned variable declaration - MISALIGNED_VAR_DECL (line: 8, col: 1): Misaligned variable declaration - MISALIGNED_VAR_DECL (line: 9, col: 29): Misaligned variable declaration +ko_struct_indent.c: Error! +Error: FORBIDDEN_TYPEDEF (line: 1, col: 1): Typedef declaration are not allowed in .c files +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Notice: GLOBAL_VAR_DETECTED (line: 2, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Error: GLOBAL_VAR_NAMING (line: 2, col: 29): Global variable must start with g_ +Error: MISALIGNED_VAR_DECL (line: 2, col: 29): Misaligned variable declaration +Notice: GLOBAL_VAR_DETECTED (line: 3, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Error: MISALIGNED_VAR_DECL (line: 3, col: 29): Misaligned variable declaration +Error: MULT_DECL_LINE (line: 3, col: 34): Multiple declarations on a single line +Error: FORBIDDEN_TYPEDEF (line: 5, col: 1): Typedef declaration are not allowed in .c files +Error: BRACE_NEWLINE (line: 5, col: 23): Expected newline before brace +Error: MISALIGNED_VAR_DECL (line: 7, col: 29): Misaligned variable declaration +Error: MISALIGNED_VAR_DECL (line: 8, col: 1): Misaligned variable declaration +Error: MISALIGNED_VAR_DECL (line: 9, col: 29): Misaligned variable declaration diff --git a/norminette/tests/rules/ko_struct_name.c b/tests/rules/samples/ko_struct_name.c similarity index 84% rename from norminette/tests/rules/ko_struct_name.c rename to tests/rules/samples/ko_struct_name.c index e8afb19c..c02d2d6f 100644 --- a/norminette/tests/rules/ko_struct_name.c +++ b/tests/rules/samples/ko_struct_name.c @@ -9,6 +9,11 @@ typedef struct toto { enum g_toto vv; } u_struct; +typedef struct test +{ + int _42; +}t_boom; + int main(void) { return (0); diff --git a/norminette/tests/rules/ko_struct_name.out b/tests/rules/samples/ko_struct_name.out similarity index 54% rename from norminette/tests/rules/ko_struct_name.out rename to tests/rules/samples/ko_struct_name.out index 1ac320de..6154f8c9 100644 --- a/norminette/tests/rules/ko_struct_name.out +++ b/tests/rules/samples/ko_struct_name.out @@ -20,20 +20,35 @@ ko_struct_name.c - IsEmptyLine In "GlobalScope" from "None" line 11": -ko_struct_name.c - IsFuncDeclaration In "GlobalScope" from "None" line 12": +ko_struct_name.c - IsUserDefinedType In "GlobalScope" from "None" line 12": + +ko_struct_name.c - IsBlockStart In "UserDefinedType" from "GlobalScope" line 13": + +ko_struct_name.c - IsVarDeclaration In "UserDefinedType" from "GlobalScope" line 14": + +ko_struct_name.c - IsBlockEnd In "UserDefinedType" from "GlobalScope" line 15": + +ko_struct_name.c - IsEmptyLine In "GlobalScope" from "None" line 16": + +ko_struct_name.c - IsFuncDeclaration In "GlobalScope" from "None" line 17": -ko_struct_name.c - IsBlockStart In "Function" from "GlobalScope" line 13": +ko_struct_name.c - IsBlockStart In "Function" from "GlobalScope" line 18": -ko_struct_name.c - IsExpressionStatement In "Function" from "GlobalScope" line 14": +ko_struct_name.c - IsExpressionStatement In "Function" from "GlobalScope" line 19": -ko_struct_name.c - IsBlockEnd In "Function" from "GlobalScope" line 15": +ko_struct_name.c - IsBlockEnd In "Function" from "GlobalScope" line 20": -ko_struct_name.c: KO! - STRUCT_TYPE_NAMING (line: 1, col: 16): Structure name must start with s_ - USER_DEFINED_TYPEDEF (line: 1, col: 25): User defined typedef must start with t_ - GLOBAL_VAR_NAMING (line: 2, col: 25): Global variable must start with g_ - GLOBAL_VAR_NAMING (line: 3, col: 25): Global variable must start with g_ - STRUCT_TYPE_NAMING (line: 5, col: 16): Structure name must start with s_ - UNION_TYPE_NAMING (line: 8, col: 19): Union name must start with u_ - USER_DEFINED_TYPEDEF (line: 8, col: 25): User defined typedef must start with t_ - USER_DEFINED_TYPEDEF (line: 10, col: 5): User defined typedef must start with t_ +ko_struct_name.c: Error! +Error: FORBIDDEN_TYPEDEF (line: 1, col: 1): Typedef declaration are not allowed in .c files +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: USER_DEFINED_TYPEDEF (line: 1, col: 25): User defined typedef must start with t_ +Notice: GLOBAL_VAR_DETECTED (line: 2, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Error: GLOBAL_VAR_NAMING (line: 2, col: 25): Global variable must start with g_ +Notice: GLOBAL_VAR_DETECTED (line: 3, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Error: GLOBAL_VAR_NAMING (line: 3, col: 25): Global variable must start with g_ +Error: FORBIDDEN_TYPEDEF (line: 5, col: 1): Typedef declaration are not allowed in .c files +Error: BRACE_NEWLINE (line: 5, col: 21): Expected newline before brace +Error: USER_DEFINED_TYPEDEF (line: 8, col: 25): User defined typedef must start with t_ +Error: USER_DEFINED_TYPEDEF (line: 10, col: 5): User defined typedef must start with t_ +Error: FORBIDDEN_TYPEDEF (line: 12, col: 1): Typedef declaration are not allowed in .c files +Error: NO_TAB_BF_TYPEDEF (line: 15, col: 2): Missing whitespace before typedef name diff --git a/tests/rules/samples/ko_too_many_arg.c b/tests/rules/samples/ko_too_many_arg.c new file mode 100644 index 00000000..b7ba8d8d --- /dev/null +++ b/tests/rules/samples/ko_too_many_arg.c @@ -0,0 +1,4 @@ +static void test(int (*f)(void *z), int a, int b, int c, int d, int e) +{ + return ; +} diff --git a/tests/rules/samples/ko_too_many_arg.out b/tests/rules/samples/ko_too_many_arg.out new file mode 100644 index 00000000..0631e99b --- /dev/null +++ b/tests/rules/samples/ko_too_many_arg.out @@ -0,0 +1,11 @@ +ko_too_many_arg.c - IsFuncDeclaration In "GlobalScope" from "None" line 1": + +ko_too_many_arg.c - IsBlockStart In "Function" from "GlobalScope" line 2": + +ko_too_many_arg.c - IsExpressionStatement In "Function" from "GlobalScope" line 3": + +ko_too_many_arg.c - IsBlockEnd In "Function" from "GlobalScope" line 4": + +ko_too_many_arg.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: TOO_MANY_ARGS (line: 1, col: 71): Function has more than 4 arguments diff --git a/norminette/tests/rules/ko_too_many_instruction.c b/tests/rules/samples/ko_too_many_instruction.c similarity index 100% rename from norminette/tests/rules/ko_too_many_instruction.c rename to tests/rules/samples/ko_too_many_instruction.c diff --git a/norminette/tests/rules/ko_too_many_instruction.out b/tests/rules/samples/ko_too_many_instruction.out similarity index 65% rename from norminette/tests/rules/ko_too_many_instruction.out rename to tests/rules/samples/ko_too_many_instruction.out index 34f5ee38..b05a3763 100644 --- a/norminette/tests/rules/ko_too_many_instruction.out +++ b/tests/rules/samples/ko_too_many_instruction.out @@ -14,8 +14,11 @@ ko_too_many_instruction.c - IsBlockEnd In "Function" from "GlobalScope" line 6": -ko_too_many_instruction.c: KO! - TOO_MANY_INSTR (line: 1, col: 10): Too many instructions on a single line - MISALIGNED_VAR_DECL (line: 1, col: 17): Misaligned variable declaration - TOO_FEW_TAB (line: 5, col: 23): Missing tabs for indent level - TOO_MANY_INSTR (line: 5, col: 23): Too many instructions on a single line +ko_too_many_instruction.c: Error! +Notice: GLOBAL_VAR_DETECTED (line: 1, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Notice: GLOBAL_VAR_DETECTED (line: 1, col: 10): Global variable present in file. Make sure it is a reasonable choice. +Error: TOO_MANY_INSTR (line: 1, col: 10): Too many instructions on a single line +Error: MISALIGNED_VAR_DECL (line: 1, col: 17): Misaligned variable declaration +Error: TOO_FEW_TAB (line: 5, col: 23): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 5, col: 23): Too many instructions on a single line diff --git a/norminette/tests/rules/ko_var_decl.c b/tests/rules/samples/ko_var_decl.c similarity index 100% rename from norminette/tests/rules/ko_var_decl.c rename to tests/rules/samples/ko_var_decl.c diff --git a/norminette/tests/rules/ko_var_decl.out b/tests/rules/samples/ko_var_decl.out similarity index 93% rename from norminette/tests/rules/ko_var_decl.out rename to tests/rules/samples/ko_var_decl.out index 0fecc7f4..2d58108e 100644 --- a/norminette/tests/rules/ko_var_decl.out +++ b/tests/rules/samples/ko_var_decl.out @@ -36,5 +36,6 @@ ko_var_decl.c - IsBlockEnd In "Function" from "GlobalScope" line 19": -ko_var_decl.c: KO! - MULT_IN_SINGLE_INSTR (line: 9, col: 1): Multiple instructions in single line control structure +ko_var_decl.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: MULT_IN_SINGLE_INSTR (line: 9, col: 1): Multiple instructions in single line control structure diff --git a/norminette/tests/rules/ko_var_name.c b/tests/rules/samples/ko_var_name.c similarity index 100% rename from norminette/tests/rules/ko_var_name.c rename to tests/rules/samples/ko_var_name.c diff --git a/tests/rules/samples/ko_var_name.out b/tests/rules/samples/ko_var_name.out new file mode 100644 index 00000000..9251e40d --- /dev/null +++ b/tests/rules/samples/ko_var_name.out @@ -0,0 +1,14 @@ +ko_var_name.c - IsVarDeclaration In "GlobalScope" from "None" line 1": + +ko_var_name.c - IsVarDeclaration In "GlobalScope" from "None" line 2": + +ko_var_name.c: Error! +Notice: GLOBAL_VAR_DETECTED (line: 1, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: SPACE_REPLACE_TAB (line: 1, col: 4): Found space when expecting tab +Error: FORBIDDEN_CHAR_NAME (line: 1, col: 5): user defined identifiers should contain only lowercase characters, digits or '_' +Error: GLOBAL_VAR_NAMING (line: 1, col: 5): Global variable must start with g_ +Notice: GLOBAL_VAR_DETECTED (line: 2, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Error: SPACE_REPLACE_TAB (line: 2, col: 4): Found space when expecting tab +Error: FORBIDDEN_CHAR_NAME (line: 2, col: 5): user defined identifiers should contain only lowercase characters, digits or '_' +Error: GLOBAL_VAR_NAMING (line: 2, col: 5): Global variable must start with g_ diff --git a/tests/rules/samples/ko_white_space_end_include.c b/tests/rules/samples/ko_white_space_end_include.c new file mode 100644 index 00000000..2da7e861 --- /dev/null +++ b/tests/rules/samples/ko_white_space_end_include.c @@ -0,0 +1,9 @@ +#include "libft.h" +#include "libft.h" + +void main(void) +{ + int i; + + i = 0; +} diff --git a/tests/rules/samples/ko_white_space_end_include.out b/tests/rules/samples/ko_white_space_end_include.out new file mode 100644 index 00000000..2090fd75 --- /dev/null +++ b/tests/rules/samples/ko_white_space_end_include.out @@ -0,0 +1,22 @@ +ko_white_space_end_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": + +ko_white_space_end_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": + +ko_white_space_end_include.c - IsEmptyLine In "GlobalScope" from "None" line 3": + +ko_white_space_end_include.c - IsFuncDeclaration In "GlobalScope" from "None" line 4": + +ko_white_space_end_include.c - IsBlockStart In "Function" from "GlobalScope" line 5": + +ko_white_space_end_include.c - IsVarDeclaration In "Function" from "GlobalScope" line 6": + +ko_white_space_end_include.c - IsEmptyLine In "Function" from "GlobalScope" line 7": + +ko_white_space_end_include.c - IsAssignation In "Function" from "GlobalScope" line 8": + +ko_white_space_end_include.c - IsBlockEnd In "Function" from "GlobalScope" line 9": + +ko_white_space_end_include.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: SPC_BEFORE_NL (line: 5, col: 2): Space before newline +Error: SPC_BEFORE_NL (line: 9, col: 2): Space before newline diff --git a/tests/rules/samples/ko_white_space_eof.c b/tests/rules/samples/ko_white_space_eof.c new file mode 100644 index 00000000..0cf5a3c6 --- /dev/null +++ b/tests/rules/samples/ko_white_space_eof.c @@ -0,0 +1,7 @@ +int main(void) +{ + int i; + + i = 0; +} + \ No newline at end of file diff --git a/tests/rules/samples/ko_white_space_eof.out b/tests/rules/samples/ko_white_space_eof.out new file mode 100644 index 00000000..5a9e221c --- /dev/null +++ b/tests/rules/samples/ko_white_space_eof.out @@ -0,0 +1,17 @@ +ko_white_space_eof.c - IsFuncDeclaration In "GlobalScope" from "None" line 1": + +ko_white_space_eof.c - IsBlockStart In "Function" from "GlobalScope" line 2": + +ko_white_space_eof.c - IsVarDeclaration In "Function" from "GlobalScope" line 3": + +ko_white_space_eof.c - IsEmptyLine In "Function" from "GlobalScope" line 4": + +ko_white_space_eof.c - IsAssignation In "Function" from "GlobalScope" line 5": + +ko_white_space_eof.c - IsBlockEnd In "Function" from "GlobalScope" line 6": + +ko_white_space_eof.c - IsEmptyLine In "GlobalScope" from "None" line 7": + +ko_white_space_eof.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: SPACE_EMPTY_LINE (line: 7, col: 1): Space on empty line diff --git a/norminette/tests/rules/long_test.c b/tests/rules/samples/long_test.c similarity index 100% rename from norminette/tests/rules/long_test.c rename to tests/rules/samples/long_test.c diff --git a/norminette/tests/rules/long_test.out b/tests/rules/samples/long_test.out similarity index 68% rename from norminette/tests/rules/long_test.out rename to tests/rules/samples/long_test.out index ef7e59bc..cb9eeebe 100644 --- a/norminette/tests/rules/long_test.out +++ b/tests/rules/samples/long_test.out @@ -37,7 +37,7 @@ long_test.c - IsEmptyLine In "GlobalScope" from "None" line 16": long_test.c - IsPreprocessorStatement In "GlobalScope" from "None" line 17": - > + long_test.c - IsFuncDeclaration In "GlobalScope" from "None" line 18": long_test.c - IsBlockStart In "Function" from "GlobalScope" line 19": @@ -142,61 +142,70 @@ long_test.c - IsFunctionCall In "GlobalScope" from "None" line 63": -long_test.c: KO! - TOO_MANY_TAB (line: 4, col: 1): Extra tabs for indent level - EMPTY_LINE_FUNCTION (line: 5, col: 1): Empty line in function - SPACE_EMPTY_LINE (line: 5, col: 1): Space on empty line - TOO_FEW_TAB (line: 6, col: 1): Missing tabs for indent level - SPACE_BEFORE_FUNC (line: 10, col: 10): space before function name - NO_ARGS_VOID (line: 10, col: 13): Empty function argument requires void - BRACE_NEWLINE (line: 10, col: 15): Expected newline before brace - BRACE_SHOULD_EOL (line: 10, col: 16): Expected newline after brace - TOO_FEW_TAB (line: 10, col: 17): Missing tabs for indent level - TOO_MANY_INSTR (line: 10, col: 17): Too many instructions on a single line - RETURN_PARENTHESIS (line: 10, col: 24): Return value must be in parenthesis - TOO_MANY_INSTR (line: 10, col: 27): Too many instructions on a single line - SPACE_BEFORE_FUNC (line: 12, col: 4): space before function name - RETURN_PARENTHESIS (line: 14, col: 12): Return value must be in parenthesis - INCLUDE_START_FILE (line: 17, col: 1): Include must be at the start of file - NO_ARGS_VOID (line: 18, col: 11): Empty function argument requires void - RETURN_PARENTHESIS (line: 20, col: 12): Return value must be in parenthesis - SPACE_BEFORE_FUNC (line: 23, col: 4): space before function name - MISSING_IDENTIFIER (line: 23, col: 17): missing type qualifier or identifier in function arguments - BRACE_NEWLINE (line: 23, col: 22): Expected newline before brace - RETURN_PARENTHESIS (line: 24, col: 12): Return value must be in parenthesis - TOO_MANY_INSTR (line: 24, col: 17): Too many instructions on a single line - TOO_MANY_FUNCS (line: 26, col: 1): Too many functions in file - MISSING_IDENTIFIER (line: 26, col: 29): missing type qualifier or identifier in function arguments - TOO_MANY_FUNCS (line: 31, col: 1): Too many functions in file - MISSING_IDENTIFIER (line: 31, col: 34): missing type qualifier or identifier in function arguments - BRACE_NEWLINE (line: 31, col: 39): Expected newline before brace - BRACE_SHOULD_EOL (line: 31, col: 40): Expected newline after brace - TOO_FEW_TAB (line: 31, col: 41): Missing tabs for indent level - TOO_MANY_INSTR (line: 31, col: 41): Too many instructions on a single line - RETURN_PARENTHESIS (line: 31, col: 48): Return value must be in parenthesis - TOO_MANY_INSTR (line: 31, col: 52): Too many instructions on a single line - GLOBAL_VAR_NAMING (line: 33, col: 7): Global variable must start with g_ - GLOBAL_VAR_NAMING (line: 34, col: 9): Global variable must start with g_ - MISALIGNED_VAR_DECL (line: 34, col: 19): Misaligned variable declaration - SPACE_REPLACE_TAB (line: 38, col: 4): Found space when expecting tab - GLOBAL_VAR_NAMING (line: 38, col: 8): Global variable must start with g_ - NO_SPC_AFR_PAR (line: 38, col: 17): Extra space after parenthesis (brace/bracket) - SPACE_REPLACE_TAB (line: 39, col: 4): Found space when expecting tab - EXP_PARENTHESIS (line: 39, col: 10): Expected parenthesis - ENUM_TYPE_NAMING (line: 40, col: 6): Enum name must start with e_ - MISALIGNED_FUNC_DECL (line: 46, col: 21): Misaligned function declaration - MISALIGNED_FUNC_DECL (line: 47, col: 21): Misaligned function declaration - MISALIGNED_FUNC_DECL (line: 48, col: 21): Misaligned function declaration - MISALIGNED_FUNC_DECL (line: 49, col: 21): Misaligned function declaration - SPACE_REPLACE_TAB (line: 50, col: 2): Found space when expecting tab - MISALIGNED_FUNC_DECL (line: 50, col: 9): Misaligned function declaration - MISALIGNED_FUNC_DECL (line: 51, col: 9): Misaligned function declaration - MISALIGNED_FUNC_DECL (line: 52, col: 17): Misaligned function declaration - MISSING_IDENTIFIER (line: 53, col: 30): missing type qualifier or identifier in function arguments - MISSING_IDENTIFIER (line: 54, col: 26): missing type qualifier or identifier in function arguments - CONSECUTIVE_SPC (line: 57, col: 43): Two or more consecutives spaces - SPC_AFTER_OPERATOR (line: 58, col: 38): missing space after operator - SPC_AFTER_POINTER (line: 60, col: 44): space after pointer - SPC_AFTER_POINTER (line: 61, col: 47): space after pointer - SPC_AFTER_POINTER (line: 61, col: 48): space after pointer - MISALIGNED_FUNC_DECL (line: 62, col: 21): Misaligned function declaration +long_test.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: TOO_MANY_TAB (line: 4, col: 1): Extra tabs for indent level +Error: EMPTY_LINE_FUNCTION (line: 5, col: 1): Empty line in function +Error: SPACE_EMPTY_LINE (line: 5, col: 1): Space on empty line +Error: TOO_FEW_TAB (line: 6, col: 1): Missing tabs for indent level +Error: SPACE_BEFORE_FUNC (line: 10, col: 10): Found space when expecting tab before function name +Error: NO_ARGS_VOID (line: 10, col: 13): Empty function argument requires void +Error: BRACE_NEWLINE (line: 10, col: 15): Expected newline before brace +Error: BRACE_SHOULD_EOL (line: 10, col: 16): Expected newline after brace +Error: TOO_FEW_TAB (line: 10, col: 17): Missing tabs for indent level +Error: TOO_FEW_TAB (line: 10, col: 17): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 10, col: 17): Too many instructions on a single line +Error: RETURN_PARENTHESIS (line: 10, col: 24): Return value must be in parenthesis +Error: TOO_MANY_INSTR (line: 10, col: 27): Too many instructions on a single line +Error: SPACE_BEFORE_FUNC (line: 12, col: 4): Found space when expecting tab before function name +Error: RETURN_PARENTHESIS (line: 14, col: 12): Return value must be in parenthesis +Error: INCLUDE_START_FILE (line: 17, col: 1): Include must be at the start of file +Error: NL_AFTER_PREPROC (line: 18, col: 1): Preprocessor statement must be followed by a newline +Error: NO_ARGS_VOID (line: 18, col: 11): Empty function argument requires void +Error: RETURN_PARENTHESIS (line: 20, col: 12): Return value must be in parenthesis +Error: SPACE_BEFORE_FUNC (line: 23, col: 4): Found space when expecting tab before function name +Error: MISSING_IDENTIFIER (line: 23, col: 16): missing type qualifier or identifier in function arguments +Error: BRACE_NEWLINE (line: 23, col: 22): Expected newline before brace +Error: RETURN_PARENTHESIS (line: 24, col: 12): Return value must be in parenthesis +Error: TOO_MANY_INSTR (line: 24, col: 17): Too many instructions on a single line +Error: TOO_MANY_FUNCS (line: 26, col: 1): Too many functions in file +Error: MISSING_IDENTIFIER (line: 26, col: 28): missing type qualifier or identifier in function arguments +Error: TOO_MANY_FUNCS (line: 31, col: 1): Too many functions in file +Error: MISSING_IDENTIFIER (line: 31, col: 33): missing type qualifier or identifier in function arguments +Error: BRACE_NEWLINE (line: 31, col: 39): Expected newline before brace +Error: BRACE_SHOULD_EOL (line: 31, col: 40): Expected newline after brace +Error: TOO_FEW_TAB (line: 31, col: 41): Missing tabs for indent level +Error: TOO_FEW_TAB (line: 31, col: 41): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 31, col: 41): Too many instructions on a single line +Error: RETURN_PARENTHESIS (line: 31, col: 48): Return value must be in parenthesis +Error: TOO_MANY_INSTR (line: 31, col: 52): Too many instructions on a single line +Notice: GLOBAL_VAR_DETECTED (line: 33, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Error: GLOBAL_VAR_NAMING (line: 33, col: 7): Global variable must start with g_ +Notice: GLOBAL_VAR_DETECTED (line: 34, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Error: GLOBAL_VAR_NAMING (line: 34, col: 9): Global variable must start with g_ +Error: MISALIGNED_VAR_DECL (line: 34, col: 19): Misaligned variable declaration +Notice: GLOBAL_VAR_DETECTED (line: 38, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Error: SPACE_REPLACE_TAB (line: 38, col: 4): Found space when expecting tab +Error: GLOBAL_VAR_NAMING (line: 38, col: 8): Global variable must start with g_ +Error: NO_SPC_AFR_PAR (line: 38, col: 17): Extra space after parenthesis (brace/bracket) +Error: SPACE_REPLACE_TAB (line: 39, col: 4): Found space when expecting tab +Error: EXP_PARENTHESIS (line: 39, col: 10): Expected parenthesis +Error: FORBIDDEN_ENUM (line: 40, col: 1): Enum declaration are not allowed in .c files +Error: ENUM_TYPE_NAMING (line: 40, col: 6): Enum name must start with e_ +Error: BRACE_NEWLINE (line: 40, col: 10): Expected newline before brace +Error: MISALIGNED_FUNC_DECL (line: 46, col: 21): Misaligned function declaration +Error: MISALIGNED_FUNC_DECL (line: 47, col: 21): Misaligned function declaration +Error: MISALIGNED_FUNC_DECL (line: 48, col: 21): Misaligned function declaration +Error: MISALIGNED_FUNC_DECL (line: 49, col: 21): Misaligned function declaration +Error: SPACE_REPLACE_TAB (line: 50, col: 2): Found space when expecting tab +Error: MISALIGNED_FUNC_DECL (line: 50, col: 9): Misaligned function declaration +Error: MISALIGNED_FUNC_DECL (line: 51, col: 9): Misaligned function declaration +Error: MISALIGNED_FUNC_DECL (line: 52, col: 17): Misaligned function declaration +Error: MISSING_IDENTIFIER (line: 53, col: 30): missing type qualifier or identifier in function arguments +Error: MISSING_IDENTIFIER (line: 54, col: 26): missing type qualifier or identifier in function arguments +Error: CONSECUTIVE_SPC (line: 57, col: 43): Two or more consecutives spaces +Error: SPC_AFTER_OPERATOR (line: 58, col: 38): missing space after operator +Error: SPC_AFTER_POINTER (line: 60, col: 44): space after pointer +Error: SPC_AFTER_POINTER (line: 61, col: 47): space after pointer +Error: SPC_AFTER_POINTER (line: 61, col: 48): space after pointer +Error: MISALIGNED_FUNC_DECL (line: 62, col: 21): Misaligned function declaration diff --git a/norminette/tests/rules/ok_comment.c b/tests/rules/samples/ok_comment.c similarity index 100% rename from norminette/tests/rules/ok_comment.c rename to tests/rules/samples/ok_comment.c diff --git a/norminette/tests/rules/ok_comment.out b/tests/rules/samples/ok_comment.out similarity index 88% rename from norminette/tests/rules/ok_comment.out rename to tests/rules/samples/ok_comment.out index 5d5beb9b..f2f6a32d 100644 --- a/norminette/tests/rules/ok_comment.out +++ b/tests/rules/samples/ok_comment.out @@ -1,5 +1,5 @@ ok_comment.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": - + ok_comment.c - IsEmptyLine In "GlobalScope" from "None" line 2": ok_comment.c - IsVarDeclaration In "GlobalScope" from "None" line 3": @@ -30,4 +30,6 @@ ok_comment.c - IsBlockEnd In "Function" from "GlobalScope" line 16": -ok_comment.c: OK! +ok_comment.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Notice: GLOBAL_VAR_DETECTED (line: 3, col: 1): Global variable present in file. Make sure it is a reasonable choice. diff --git a/norminette/tests/rules/ok_const_ptr_1.c b/tests/rules/samples/ok_const_ptr_1.c similarity index 100% rename from norminette/tests/rules/ok_const_ptr_1.c rename to tests/rules/samples/ok_const_ptr_1.c diff --git a/norminette/tests/rules/ok_const_ptr_1.out b/tests/rules/samples/ok_const_ptr_1.out similarity index 84% rename from norminette/tests/rules/ok_const_ptr_1.out rename to tests/rules/samples/ok_const_ptr_1.out index 1a994796..eb11ce58 100644 --- a/norminette/tests/rules/ok_const_ptr_1.out +++ b/tests/rules/samples/ok_const_ptr_1.out @@ -24,4 +24,10 @@ ok_const_ptr_1.c - IsBlockEnd In "Function" from "GlobalScope" line 13": -ok_const_ptr_1.c: OK! +ok_const_ptr_1.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: TOO_MANY_VARS_FUNC (line: 8, col: 1): Too many variables declarations in a function +Error: TOO_MANY_VARS_FUNC (line: 9, col: 1): Too many variables declarations in a function +Error: TOO_MANY_VARS_FUNC (line: 10, col: 1): Too many variables declarations in a function +Error: TOO_MANY_VARS_FUNC (line: 11, col: 1): Too many variables declarations in a function +Error: TOO_MANY_VARS_FUNC (line: 12, col: 1): Too many variables declarations in a function diff --git a/norminette/tests/rules/ok_const_ptr_2.h b/tests/rules/samples/ok_const_ptr_2.h similarity index 100% rename from norminette/tests/rules/ok_const_ptr_2.h rename to tests/rules/samples/ok_const_ptr_2.h diff --git a/norminette/tests/rules/ok_const_ptr_2.out b/tests/rules/samples/ok_const_ptr_2.out similarity index 92% rename from norminette/tests/rules/ok_const_ptr_2.out rename to tests/rules/samples/ok_const_ptr_2.out index f92a4a1f..1b559b79 100644 --- a/norminette/tests/rules/ok_const_ptr_2.out +++ b/tests/rules/samples/ok_const_ptr_2.out @@ -1,7 +1,7 @@ ok_const_ptr_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 1": - + ok_const_ptr_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 2": - + ok_const_ptr_2.h - IsEmptyLine In "GlobalScope" from "None" line 3": ok_const_ptr_2.h - IsFuncPrototype In "GlobalScope" from "None" line 4": @@ -35,5 +35,6 @@ ok_const_ptr_2.h - IsEmptyLine In "GlobalScope" from "None" line 18": ok_const_ptr_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 19": - -ok_const_ptr_2.h: OK! + +ok_const_ptr_2.h: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/ok_cs_cs.c b/tests/rules/samples/ok_cs_cs.c similarity index 100% rename from norminette/tests/rules/ok_cs_cs.c rename to tests/rules/samples/ok_cs_cs.c diff --git a/norminette/tests/rules/ok_cs_cs.out b/tests/rules/samples/ok_cs_cs.out similarity index 89% rename from norminette/tests/rules/ok_cs_cs.out rename to tests/rules/samples/ok_cs_cs.out index 719be308..729a8f1f 100644 --- a/norminette/tests/rules/ok_cs_cs.out +++ b/tests/rules/samples/ok_cs_cs.out @@ -10,4 +10,5 @@ ok_cs_cs.c - IsBlockEnd In "Function" from "GlobalScope" line 6": -ok_cs_cs.c: OK! +ok_cs_cs.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/ok_cs_indent.c b/tests/rules/samples/ok_cs_indent.c similarity index 100% rename from norminette/tests/rules/ok_cs_indent.c rename to tests/rules/samples/ok_cs_indent.c diff --git a/norminette/tests/rules/ok_cs_indent.out b/tests/rules/samples/ok_cs_indent.out similarity index 94% rename from norminette/tests/rules/ok_cs_indent.out rename to tests/rules/samples/ok_cs_indent.out index 71e66d4e..5b0d7d22 100644 --- a/norminette/tests/rules/ok_cs_indent.out +++ b/tests/rules/samples/ok_cs_indent.out @@ -18,4 +18,5 @@ ok_cs_indent.c - IsBlockEnd In "Function" from "GlobalScope" line 10": -ok_cs_indent.c: OK! +ok_cs_indent.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/ok_enum.c b/tests/rules/samples/ok_enum.c similarity index 100% rename from norminette/tests/rules/ok_enum.c rename to tests/rules/samples/ok_enum.c diff --git a/norminette/tests/rules/ok_enum.out b/tests/rules/samples/ok_enum.out similarity index 81% rename from norminette/tests/rules/ok_enum.out rename to tests/rules/samples/ok_enum.out index 158b122e..97345cb6 100644 --- a/norminette/tests/rules/ok_enum.out +++ b/tests/rules/samples/ok_enum.out @@ -10,4 +10,6 @@ ok_enum.c - IsBlockEnd In "UserDefinedEnum" from "GlobalScope" line 6": -ok_enum.c: OK! +ok_enum.c: Error! +Error: FORBIDDEN_TYPEDEF (line: 1, col: 1): Typedef declaration are not allowed in .c files +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/ok_func_classic.c b/tests/rules/samples/ok_func_classic.c similarity index 100% rename from norminette/tests/rules/ok_func_classic.c rename to tests/rules/samples/ok_func_classic.c diff --git a/norminette/tests/rules/ok_func_classic.out b/tests/rules/samples/ok_func_classic.out similarity index 94% rename from norminette/tests/rules/ok_func_classic.out rename to tests/rules/samples/ok_func_classic.out index d29c4704..0e4b1cde 100644 --- a/norminette/tests/rules/ok_func_classic.out +++ b/tests/rules/samples/ok_func_classic.out @@ -6,9 +6,9 @@ ok_func_classic.c - IsComment In "GlobalScope" from "None" line 4": + ** aallo + ** + */> ok_func_classic.c - IsEmptyLine In "GlobalScope" from "None" line 8": ok_func_classic.c - IsFuncDeclaration In "GlobalScope" from "None" line 9": @@ -81,4 +81,7 @@ ok_func_classic.c - IsBlockEnd In "Function" from "GlobalScope" line 50": -ok_func_classic.c: OK! +ok_func_classic.c: Error! +Notice: GLOBAL_VAR_DETECTED (line: 1, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: CHAR_AS_STRING (line: 48, col: 21): Character constants can have only one character diff --git a/tests/rules/samples/ok_func_name.c b/tests/rules/samples/ok_func_name.c new file mode 100644 index 00000000..305854f5 --- /dev/null +++ b/tests/rules/samples/ok_func_name.c @@ -0,0 +1,21 @@ +int func(void); + +int func2(void) +{ + return (1); +} + +typedef struct s_sTRuct t_Struct; +struct s_sTRuct; + +int g_glOb; + +int Main(void) +{ + char *sTr; + int TAB; + size_T val; + t_Struct val; + + return ; +} diff --git a/tests/rules/samples/ok_func_name.out b/tests/rules/samples/ok_func_name.out new file mode 100644 index 00000000..44c741aa --- /dev/null +++ b/tests/rules/samples/ok_func_name.out @@ -0,0 +1,53 @@ +ok_func_name.c - IsFuncPrototype In "GlobalScope" from "None" line 1": + +ok_func_name.c - IsEmptyLine In "GlobalScope" from "None" line 2": + +ok_func_name.c - IsFuncDeclaration In "GlobalScope" from "None" line 3": + +ok_func_name.c - IsBlockStart In "Function" from "GlobalScope" line 4": + +ok_func_name.c - IsExpressionStatement In "Function" from "GlobalScope" line 5": + +ok_func_name.c - IsBlockEnd In "Function" from "GlobalScope" line 6": + +ok_func_name.c - IsEmptyLine In "GlobalScope" from "None" line 7": + +ok_func_name.c - IsUserDefinedType In "GlobalScope" from "None" line 8": + +ok_func_name.c - IsUserDefinedType In "GlobalScope" from "None" line 9": + +ok_func_name.c - IsEmptyLine In "GlobalScope" from "None" line 10": + +ok_func_name.c - IsVarDeclaration In "GlobalScope" from "None" line 11": + +ok_func_name.c - IsEmptyLine In "GlobalScope" from "None" line 12": + +ok_func_name.c - IsFuncDeclaration In "GlobalScope" from "None" line 13": + +ok_func_name.c - IsBlockStart In "Function" from "GlobalScope" line 14": + +ok_func_name.c - IsVarDeclaration In "Function" from "GlobalScope" line 15": + +ok_func_name.c - IsVarDeclaration In "Function" from "GlobalScope" line 16": + +ok_func_name.c - IsVarDeclaration In "Function" from "GlobalScope" line 17": + +ok_func_name.c - IsVarDeclaration In "Function" from "GlobalScope" line 18": + +ok_func_name.c - IsEmptyLine In "Function" from "GlobalScope" line 19": + +ok_func_name.c - IsExpressionStatement In "Function" from "GlobalScope" line 20": + +ok_func_name.c - IsBlockEnd In "Function" from "GlobalScope" line 21": + +ok_func_name.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: FORBIDDEN_TYPEDEF (line: 8, col: 1): Typedef declaration are not allowed in .c files +Error: FORBIDDEN_CHAR_NAME (line: 8, col: 25): user defined identifiers should contain only lowercase characters, digits or '_' +Error: FORBIDDEN_STRUCT (line: 9, col: 1): Struct declaration are not allowed in .c files +Error: FORBIDDEN_CHAR_NAME (line: 9, col: 25): user defined identifiers should contain only lowercase characters, digits or '_' +Notice: GLOBAL_VAR_DETECTED (line: 11, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Error: FORBIDDEN_CHAR_NAME (line: 11, col: 25): user defined identifiers should contain only lowercase characters, digits or '_' +Error: FORBIDDEN_CHAR_NAME (line: 13, col: 5): user defined identifiers should contain only lowercase characters, digits or '_' +Error: FORBIDDEN_CHAR_NAME (line: 15, col: 18): user defined identifiers should contain only lowercase characters, digits or '_' +Error: FORBIDDEN_CHAR_NAME (line: 16, col: 17): user defined identifiers should contain only lowercase characters, digits or '_' diff --git a/norminette/tests/rules/ok_func_ptr.c b/tests/rules/samples/ok_func_ptr.c similarity index 100% rename from norminette/tests/rules/ok_func_ptr.c rename to tests/rules/samples/ok_func_ptr.c diff --git a/norminette/tests/rules/ok_func_ptr.out b/tests/rules/samples/ok_func_ptr.out similarity index 84% rename from norminette/tests/rules/ok_func_ptr.out rename to tests/rules/samples/ok_func_ptr.out index 5975a54b..57ac21e4 100644 --- a/norminette/tests/rules/ok_func_ptr.out +++ b/tests/rules/samples/ok_func_ptr.out @@ -22,4 +22,8 @@ ok_func_ptr.c - IsBlockEnd In "Function" from "GlobalScope" line 11": -ok_func_ptr.c: OK! +ok_func_ptr.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Notice: GLOBAL_VAR_DETECTED (line: 2, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Error: FORBIDDEN_TYPEDEF (line: 5, col: 1): Typedef declaration are not allowed in .c files +Error: FORBIDDEN_TYPEDEF (line: 6, col: 1): Typedef declaration are not allowed in .c files diff --git a/norminette/tests/rules/ok_function_pointer.c b/tests/rules/samples/ok_function_pointer.c similarity index 100% rename from norminette/tests/rules/ok_function_pointer.c rename to tests/rules/samples/ok_function_pointer.c diff --git a/norminette/tests/rules/ok_function_pointer.out b/tests/rules/samples/ok_function_pointer.out similarity index 88% rename from norminette/tests/rules/ok_function_pointer.out rename to tests/rules/samples/ok_function_pointer.out index e42a57cc..e039681b 100644 --- a/norminette/tests/rules/ok_function_pointer.out +++ b/tests/rules/samples/ok_function_pointer.out @@ -4,4 +4,5 @@ ok_function_pointer.c - IsAssignation In "GlobalScope" from "None" line 3": -ok_function_pointer.c: OK! +ok_function_pointer.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/tests/rules/samples/ok_if_defined.c b/tests/rules/samples/ok_if_defined.c new file mode 100644 index 00000000..ba990f91 --- /dev/null +++ b/tests/rules/samples/ok_if_defined.c @@ -0,0 +1,11 @@ +#if defined FOO +# undef FOO +#elif defined (BAR) +# undef BAR +#elif defined(FOOBAR) +# undef FOOBAR +#endif /* if defined FOO */ + +#define A defined B + +void defined(void); diff --git a/tests/rules/samples/ok_if_defined.out b/tests/rules/samples/ok_if_defined.out new file mode 100644 index 00000000..25a52d9b --- /dev/null +++ b/tests/rules/samples/ok_if_defined.out @@ -0,0 +1,25 @@ +ok_if_defined.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": + +ok_if_defined.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": + +ok_if_defined.c - IsPreprocessorStatement In "GlobalScope" from "None" line 3": + +ok_if_defined.c - IsPreprocessorStatement In "GlobalScope" from "None" line 4": + +ok_if_defined.c - IsPreprocessorStatement In "GlobalScope" from "None" line 5": + +ok_if_defined.c - IsPreprocessorStatement In "GlobalScope" from "None" line 6": + +ok_if_defined.c - IsPreprocessorStatement In "GlobalScope" from "None" line 7": + +ok_if_defined.c - IsEmptyLine In "GlobalScope" from "None" line 8": + +ok_if_defined.c - IsPreprocessorStatement In "GlobalScope" from "None" line 9": + +ok_if_defined.c - IsEmptyLine In "GlobalScope" from "None" line 10": + +ok_if_defined.c - IsFuncPrototype In "GlobalScope" from "None" line 11": + +ok_if_defined.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: PREPROC_CONSTANT (line: 9, col: 19): Preprocessor statement must only contain constant defines diff --git a/norminette/tests/rules/ok_ifdef.c b/tests/rules/samples/ok_ifdef.c similarity index 100% rename from norminette/tests/rules/ok_ifdef.c rename to tests/rules/samples/ok_ifdef.c diff --git a/norminette/tests/rules/ok_ifdef.out b/tests/rules/samples/ok_ifdef.out similarity index 58% rename from norminette/tests/rules/ok_ifdef.out rename to tests/rules/samples/ok_ifdef.out index bd3a7f26..3775bce2 100644 --- a/norminette/tests/rules/ok_ifdef.out +++ b/tests/rules/samples/ok_ifdef.out @@ -1,17 +1,21 @@ ok_ifdef.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": - + ok_ifdef.c - IsFuncDeclaration In "GlobalScope" from "None" line 2": ok_ifdef.c - IsPreprocessorStatement In "GlobalScope" from "None" line 3": - <#ELSE=#else> + ok_ifdef.c - IsFuncDeclaration In "GlobalScope" from "None" line 4": -ok_ifdef.c - IsPreprocessorStatement In "Function" from "GlobalScope" line 5": - -ok_ifdef.c - IsBlockStart In "Function" from "GlobalScope" line 6": +ok_ifdef.c - IsPreprocessorStatement In "GlobalScope" from "None" line 5": + +ok_ifdef.c - IsBlockStart In "GlobalScope" from "None" line 6": ok_ifdef.c - IsExpressionStatement In "Function" from "GlobalScope" line 7": ok_ifdef.c - IsBlockEnd In "Function" from "GlobalScope" line 8": -ok_ifdef.c: OK! +ok_ifdef.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: NL_AFTER_PREPROC (line: 2, col: 1): Preprocessor statement must be followed by a newline +Error: NL_AFTER_PREPROC (line: 4, col: 1): Preprocessor statement must be followed by a newline +Error: NL_AFTER_PREPROC (line: 6, col: 1): Preprocessor statement must be followed by a newline diff --git a/norminette/tests/rules/ok_include.c b/tests/rules/samples/ok_include.c similarity index 100% rename from norminette/tests/rules/ok_include.c rename to tests/rules/samples/ok_include.c diff --git a/tests/rules/samples/ok_include.out b/tests/rules/samples/ok_include.out new file mode 100644 index 00000000..822a23a0 --- /dev/null +++ b/tests/rules/samples/ok_include.out @@ -0,0 +1,10 @@ +ok_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": + +ok_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": + +ok_include.c - IsEmptyLine In "GlobalScope" from "None" line 3": + +ok_include.c - IsPreprocessorStatement In "GlobalScope" from "None" line 4": + +ok_include.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/ok_pointer.c b/tests/rules/samples/ok_pointer.c similarity index 100% rename from norminette/tests/rules/ok_pointer.c rename to tests/rules/samples/ok_pointer.c diff --git a/norminette/tests/rules/ok_pointer.out b/tests/rules/samples/ok_pointer.out similarity index 92% rename from norminette/tests/rules/ok_pointer.out rename to tests/rules/samples/ok_pointer.out index 63092008..171d06f0 100644 --- a/norminette/tests/rules/ok_pointer.out +++ b/tests/rules/samples/ok_pointer.out @@ -12,4 +12,5 @@ ok_pointer.c - IsBlockEnd In "Function" from "GlobalScope" line 7": -ok_pointer.c: OK! +ok_pointer.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/ok_preproc_define.c b/tests/rules/samples/ok_preproc_define.c similarity index 100% rename from norminette/tests/rules/ok_preproc_define.c rename to tests/rules/samples/ok_preproc_define.c diff --git a/tests/rules/samples/ok_preproc_define.out b/tests/rules/samples/ok_preproc_define.out new file mode 100644 index 00000000..183db7cb --- /dev/null +++ b/tests/rules/samples/ok_preproc_define.out @@ -0,0 +1,8 @@ +ok_preproc_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": + +ok_preproc_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": + +ok_preproc_define.c - IsPreprocessorStatement In "GlobalScope" from "None" line 3": + +ok_preproc_define.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/ok_preproc_indent.c b/tests/rules/samples/ok_preproc_indent.c similarity index 100% rename from norminette/tests/rules/ok_preproc_indent.c rename to tests/rules/samples/ok_preproc_indent.c diff --git a/norminette/tests/rules/ok_preproc_indent.out b/tests/rules/samples/ok_preproc_indent.out similarity index 55% rename from norminette/tests/rules/ok_preproc_indent.out rename to tests/rules/samples/ok_preproc_indent.out index 964defbb..0ef044bc 100644 --- a/norminette/tests/rules/ok_preproc_indent.out +++ b/tests/rules/samples/ok_preproc_indent.out @@ -1,15 +1,16 @@ ok_preproc_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": - + ok_preproc_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": - + ok_preproc_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 3": - + ok_preproc_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 4": - + ok_preproc_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 5": - + ok_preproc_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 6": - + ok_preproc_indent.c - IsPreprocessorStatement In "GlobalScope" from "None" line 7": - -ok_preproc_indent.c: OK! + +ok_preproc_indent.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/ok_protection.h b/tests/rules/samples/ok_protection.h similarity index 99% rename from norminette/tests/rules/ok_protection.h rename to tests/rules/samples/ok_protection.h index cfaa3aad..95a86b63 100644 --- a/norminette/tests/rules/ok_protection.h +++ b/tests/rules/samples/ok_protection.h @@ -1,6 +1,7 @@ #ifndef OK_PROTECTION_H # define OK_PROTECTION_H # define TOTO "tata" + void main(void); int g_toto; diff --git a/norminette/tests/rules/ok_protection.out b/tests/rules/samples/ok_protection.out similarity index 53% rename from norminette/tests/rules/ok_protection.out rename to tests/rules/samples/ok_protection.out index e31dcadd..00a51da8 100644 --- a/norminette/tests/rules/ok_protection.out +++ b/tests/rules/samples/ok_protection.out @@ -1,17 +1,21 @@ ok_protection.h - IsPreprocessorStatement In "GlobalScope" from "None" line 1": - + ok_protection.h - IsPreprocessorStatement In "GlobalScope" from "None" line 2": - + ok_protection.h - IsPreprocessorStatement In "GlobalScope" from "None" line 3": - -ok_protection.h - IsFuncPrototype In "GlobalScope" from "None" line 4": + +ok_protection.h - IsEmptyLine In "GlobalScope" from "None" line 4": + +ok_protection.h - IsFuncPrototype In "GlobalScope" from "None" line 5": -ok_protection.h - IsVarDeclaration In "GlobalScope" from "None" line 5": +ok_protection.h - IsVarDeclaration In "GlobalScope" from "None" line 6": -ok_protection.h - IsEmptyLine In "GlobalScope" from "None" line 6": +ok_protection.h - IsEmptyLine In "GlobalScope" from "None" line 7": -ok_protection.h - IsAssignation In "GlobalScope" from "None" line 7": +ok_protection.h - IsAssignation In "GlobalScope" from "None" line 8": -ok_protection.h - IsPreprocessorStatement In "GlobalScope" from "None" line 8": - -ok_protection.h: OK! +ok_protection.h - IsPreprocessorStatement In "GlobalScope" from "None" line 9": + +ok_protection.h: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Notice: GLOBAL_VAR_DETECTED (line: 6, col: 1): Global variable present in file. Make sure it is a reasonable choice. diff --git a/norminette/tests/rules/ok_struct_data.c b/tests/rules/samples/ok_struct_data.c similarity index 100% rename from norminette/tests/rules/ok_struct_data.c rename to tests/rules/samples/ok_struct_data.c diff --git a/norminette/tests/rules/ok_struct_data.out b/tests/rules/samples/ok_struct_data.out similarity index 95% rename from norminette/tests/rules/ok_struct_data.out rename to tests/rules/samples/ok_struct_data.out index 7b5b45b2..b7ab7a83 100644 --- a/norminette/tests/rules/ok_struct_data.out +++ b/tests/rules/samples/ok_struct_data.out @@ -20,4 +20,5 @@ ok_struct_data.c - IsBlockEnd In "Function" from "GlobalScope" line 11": -ok_struct_data.c: OK! +ok_struct_data.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/ok_struct_name.c b/tests/rules/samples/ok_struct_name.c similarity index 100% rename from norminette/tests/rules/ok_struct_name.c rename to tests/rules/samples/ok_struct_name.c diff --git a/norminette/tests/rules/ok_struct_name.out b/tests/rules/samples/ok_struct_name.out similarity index 83% rename from norminette/tests/rules/ok_struct_name.out rename to tests/rules/samples/ok_struct_name.out index e1ad1cdd..29d8e4c9 100644 --- a/norminette/tests/rules/ok_struct_name.out +++ b/tests/rules/samples/ok_struct_name.out @@ -34,4 +34,9 @@ ok_struct_name.c - IsBlockEnd In "Function" from "GlobalScope" line 18": -ok_struct_name.c: OK! +ok_struct_name.c: Error! +Error: FORBIDDEN_TYPEDEF (line: 1, col: 1): Typedef declaration are not allowed in .c files +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: FORBIDDEN_STRUCT (line: 5, col: 1): Struct declaration are not allowed in .c files +Notice: GLOBAL_VAR_DETECTED (line: 11, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Notice: GLOBAL_VAR_DETECTED (line: 12, col: 1): Global variable present in file. Make sure it is a reasonable choice. diff --git a/norminette/tests/rules/ok_typedef.c b/tests/rules/samples/ok_typedef.c similarity index 100% rename from norminette/tests/rules/ok_typedef.c rename to tests/rules/samples/ok_typedef.c diff --git a/norminette/tests/rules/ok_typedef.out b/tests/rules/samples/ok_typedef.out similarity index 85% rename from norminette/tests/rules/ok_typedef.out rename to tests/rules/samples/ok_typedef.out index 4426819b..fa8dc436 100644 --- a/norminette/tests/rules/ok_typedef.out +++ b/tests/rules/samples/ok_typedef.out @@ -32,4 +32,8 @@ ok_typedef.c - IsBlockEnd In "Function" from "GlobalScope" line 17": -ok_typedef.c: OK! +ok_typedef.c: Error! +Error: FORBIDDEN_TYPEDEF (line: 1, col: 1): Typedef declaration are not allowed in .c files +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: FORBIDDEN_TYPEDEF (line: 7, col: 1): Typedef declaration are not allowed in .c files +Error: FORBIDDEN_STRUCT (line: 9, col: 1): Struct declaration are not allowed in .c files diff --git a/norminette/tests/rules/ok_var_decl.c b/tests/rules/samples/ok_var_decl.c similarity index 100% rename from norminette/tests/rules/ok_var_decl.c rename to tests/rules/samples/ok_var_decl.c diff --git a/norminette/tests/rules/ok_var_decl.out b/tests/rules/samples/ok_var_decl.out similarity index 96% rename from norminette/tests/rules/ok_var_decl.out rename to tests/rules/samples/ok_var_decl.out index 8cc2ad29..c8cbcc92 100644 --- a/norminette/tests/rules/ok_var_decl.out +++ b/tests/rules/samples/ok_var_decl.out @@ -40,4 +40,5 @@ ok_var_decl.c - IsBlockEnd In "Function" from "GlobalScope" line 21": -ok_var_decl.c: OK! +ok_var_decl.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/ok_var_name.c b/tests/rules/samples/ok_var_name.c similarity index 100% rename from norminette/tests/rules/ok_var_name.c rename to tests/rules/samples/ok_var_name.c diff --git a/norminette/tests/rules/ok_var_name.out b/tests/rules/samples/ok_var_name.out similarity index 98% rename from norminette/tests/rules/ok_var_name.out rename to tests/rules/samples/ok_var_name.out index e088ce0f..204b0754 100644 --- a/norminette/tests/rules/ok_var_name.out +++ b/tests/rules/samples/ok_var_name.out @@ -86,4 +86,5 @@ ok_var_name.c - IsBlockEnd In "Function" from "GlobalScope" line 44": -ok_var_name.c: OK! +ok_var_name.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/random_test.c b/tests/rules/samples/random_test.c similarity index 100% rename from norminette/tests/rules/random_test.c rename to tests/rules/samples/random_test.c diff --git a/norminette/tests/rules/random_test.out b/tests/rules/samples/random_test.out similarity index 100% rename from norminette/tests/rules/random_test.out rename to tests/rules/samples/random_test.out diff --git a/tests/rules/samples/test_comment_escaping_with_backslash.c b/tests/rules/samples/test_comment_escaping_with_backslash.c new file mode 100644 index 00000000..e3a7b0b3 --- /dev/null +++ b/tests/rules/samples/test_comment_escaping_with_backslash.c @@ -0,0 +1,11 @@ +/* *\ +/ + +#include + +int main() { + for (int i=0; i<2;++i) + printf("%d\n", i); +} + +/**/ \ No newline at end of file diff --git a/tests/rules/samples/test_comment_escaping_with_backslash.out b/tests/rules/samples/test_comment_escaping_with_backslash.out new file mode 100644 index 00000000..4bbbcfa9 --- /dev/null +++ b/tests/rules/samples/test_comment_escaping_with_backslash.out @@ -0,0 +1,37 @@ +test_comment_escaping_with_backslash.c - IsComment In "GlobalScope" from "None" line 1": + +test_comment_escaping_with_backslash.c - IsEmptyLine In "GlobalScope" from "None" line 3": + +test_comment_escaping_with_backslash.c - IsPreprocessorStatement In "GlobalScope" from "None" line 4": + +test_comment_escaping_with_backslash.c - IsEmptyLine In "GlobalScope" from "None" line 5": + +test_comment_escaping_with_backslash.c - IsFuncDeclaration In "GlobalScope" from "None" line 6": + +test_comment_escaping_with_backslash.c - IsBlockStart In "Function" from "GlobalScope" line 6": + +test_comment_escaping_with_backslash.c - IsControlStatement In "Function" from "GlobalScope" line 7": + +test_comment_escaping_with_backslash.c - IsFunctionCall In "ControlStructure" from "Function" line 8": + +test_comment_escaping_with_backslash.c - IsBlockEnd In "Function" from "GlobalScope" line 9": + +test_comment_escaping_with_backslash.c - IsEmptyLine In "GlobalScope" from "None" line 10": + +test_comment_escaping_with_backslash.c - IsComment In "GlobalScope" from "None" line 11": + +test_comment_escaping_with_backslash.c: Error! +Error: INVALID_HEADER (line: 3, col: 1): Missing or invalid 42 header +Error: SPACE_BEFORE_FUNC (line: 6, col: 4): Found space when expecting tab before function name +Error: NO_ARGS_VOID (line: 6, col: 10): Empty function argument requires void +Error: BRACE_NEWLINE (line: 6, col: 12): Expected newline before brace +Error: TOO_FEW_TAB (line: 7, col: 1): Missing tabs for indent level +Error: FORBIDDEN_CS (line: 7, col: 3): Forbidden control structure +Error: SPACE_REPLACE_TAB (line: 7, col: 3): Found space when expecting tab +Error: SPC_AFTER_OPERATOR (line: 7, col: 13): missing space after operator +Error: SPC_BFR_OPERATOR (line: 7, col: 13): missing space before operator +Error: SPC_AFTER_OPERATOR (line: 7, col: 18): missing space after operator +Error: SPC_BFR_OPERATOR (line: 7, col: 18): missing space before operator +Error: SPC_AFTER_OPERATOR (line: 7, col: 20): missing space after operator +Error: TOO_FEW_TAB (line: 8, col: 1): Missing tabs for indent level +Error: SPACE_REPLACE_TAB (line: 8, col: 5): Found space when expecting tab diff --git a/tests/rules/samples/test_comment_escaping_with_trigraph.c b/tests/rules/samples/test_comment_escaping_with_trigraph.c new file mode 100644 index 00000000..eba725d5 --- /dev/null +++ b/tests/rules/samples/test_comment_escaping_with_trigraph.c @@ -0,0 +1,11 @@ +/* *??/ +/ + +#include + +int main() { + for (int i=0; i<2;++i) + printf("%d\n", i); +} + +/**/ \ No newline at end of file diff --git a/tests/rules/samples/test_comment_escaping_with_trigraph.out b/tests/rules/samples/test_comment_escaping_with_trigraph.out new file mode 100644 index 00000000..2cf49b62 --- /dev/null +++ b/tests/rules/samples/test_comment_escaping_with_trigraph.out @@ -0,0 +1,37 @@ +test_comment_escaping_with_trigraph.c - IsComment In "GlobalScope" from "None" line 1": + +test_comment_escaping_with_trigraph.c - IsEmptyLine In "GlobalScope" from "None" line 3": + +test_comment_escaping_with_trigraph.c - IsPreprocessorStatement In "GlobalScope" from "None" line 4": + +test_comment_escaping_with_trigraph.c - IsEmptyLine In "GlobalScope" from "None" line 5": + +test_comment_escaping_with_trigraph.c - IsFuncDeclaration In "GlobalScope" from "None" line 6": + +test_comment_escaping_with_trigraph.c - IsBlockStart In "Function" from "GlobalScope" line 6": + +test_comment_escaping_with_trigraph.c - IsControlStatement In "Function" from "GlobalScope" line 7": + +test_comment_escaping_with_trigraph.c - IsFunctionCall In "ControlStructure" from "Function" line 8": + +test_comment_escaping_with_trigraph.c - IsBlockEnd In "Function" from "GlobalScope" line 9": + +test_comment_escaping_with_trigraph.c - IsEmptyLine In "GlobalScope" from "None" line 10": + +test_comment_escaping_with_trigraph.c - IsComment In "GlobalScope" from "None" line 11": + +test_comment_escaping_with_trigraph.c: Error! +Error: INVALID_HEADER (line: 3, col: 1): Missing or invalid 42 header +Error: SPACE_BEFORE_FUNC (line: 6, col: 4): Found space when expecting tab before function name +Error: NO_ARGS_VOID (line: 6, col: 10): Empty function argument requires void +Error: BRACE_NEWLINE (line: 6, col: 12): Expected newline before brace +Error: TOO_FEW_TAB (line: 7, col: 1): Missing tabs for indent level +Error: FORBIDDEN_CS (line: 7, col: 3): Forbidden control structure +Error: SPACE_REPLACE_TAB (line: 7, col: 3): Found space when expecting tab +Error: SPC_AFTER_OPERATOR (line: 7, col: 13): missing space after operator +Error: SPC_BFR_OPERATOR (line: 7, col: 13): missing space before operator +Error: SPC_AFTER_OPERATOR (line: 7, col: 18): missing space after operator +Error: SPC_BFR_OPERATOR (line: 7, col: 18): missing space before operator +Error: SPC_AFTER_OPERATOR (line: 7, col: 20): missing space after operator +Error: TOO_FEW_TAB (line: 8, col: 1): Missing tabs for indent level +Error: SPACE_REPLACE_TAB (line: 8, col: 5): Found space when expecting tab diff --git a/tests/rules/samples/test_comment_line_len.c b/tests/rules/samples/test_comment_line_len.c new file mode 100644 index 00000000..c27c8984 --- /dev/null +++ b/tests/rules/samples/test_comment_line_len.c @@ -0,0 +1,10 @@ +/** + * + * + * THIS IS A BIG COMMENTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT + * THIS IS A BIG COMMENTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT + */ + +int main(void){ + return(0); +} diff --git a/tests/rules/samples/test_comment_line_len.out b/tests/rules/samples/test_comment_line_len.out new file mode 100644 index 00000000..6c9a3e38 --- /dev/null +++ b/tests/rules/samples/test_comment_line_len.out @@ -0,0 +1,27 @@ +test_comment_line_len.c - IsComment In "GlobalScope" from "None" line 5": + +test_comment_line_len.c - IsEmptyLine In "GlobalScope" from "None" line 7": + +test_comment_line_len.c - IsFuncDeclaration In "GlobalScope" from "None" line 8": + +test_comment_line_len.c - IsBlockStart In "Function" from "GlobalScope" line 8": + +test_comment_line_len.c - IsExpressionStatement In "Function" from "GlobalScope" line 9": + +test_comment_line_len.c - IsBlockEnd In "Function" from "GlobalScope" line 10": + +test_comment_line_len.c: Error! +Error: LINE_TOO_LONG (line: 4, col: 1): line too long +Error: LINE_TOO_LONG (line: 5, col: 1): line too long +Error: INVALID_HEADER (line: 7, col: 1): Missing or invalid 42 header +Error: SPACE_BEFORE_FUNC (line: 8, col: 4): Found space when expecting tab before function name +Error: BRACE_NEWLINE (line: 8, col: 15): Expected newline before brace +Error: TOO_FEW_TAB (line: 9, col: 1): Missing tabs for indent level +Error: SPACE_AFTER_KW (line: 9, col: 9): Missing space after keyword +Error: SPACE_REPLACE_TAB (line: 9, col: 9): Found space when expecting tab +Error: SPC_BFR_PAR (line: 9, col: 15): Missing space before parenthesis (brace/bracket) diff --git a/tests/rules/samples/test_comments.c b/tests/rules/samples/test_comments.c new file mode 100644 index 00000000..78b59007 --- /dev/null +++ b/tests/rules/samples/test_comments.c @@ -0,0 +1,21 @@ +struct { + // points is to something + int points; // is an int :D +}; + +typedef /* oopss */ bool bool; + +enum test { + // blaboe + hello, // it works + /* error*/ error +}; + +void hello(/* nothing */ void) // error because comment is in middle of the line +{ + // error because scope is from a function + { + // are you trying to cheat? + // error because scope is from a function + } +} diff --git a/tests/rules/samples/test_comments.out b/tests/rules/samples/test_comments.out new file mode 100644 index 00000000..82e7d82f --- /dev/null +++ b/tests/rules/samples/test_comments.out @@ -0,0 +1,78 @@ +test_comments.c - IsUserDefinedType In "GlobalScope" from "None" line 1": + +test_comments.c - IsComment In "UserDefinedType" from "GlobalScope" line 2": + +test_comments.c - IsVarDeclaration In "UserDefinedType" from "GlobalScope" line 3": + +test_comments.c - IsComment In "UserDefinedType" from "GlobalScope" line 3": + +test_comments.c - IsBlockEnd In "UserDefinedType" from "GlobalScope" line 4": + +test_comments.c - IsEmptyLine In "GlobalScope" from "None" line 5": + +test_comments.c - IsUserDefinedType In "GlobalScope" from "None" line 6": + +test_comments.c - IsEmptyLine In "GlobalScope" from "None" line 7": + +test_comments.c - IsUserDefinedType In "GlobalScope" from "None" line 8": + +test_comments.c - IsComment In "UserDefinedEnum" from "GlobalScope" line 9": + +test_comments.c - IsEnumVarDecl In "UserDefinedEnum" from "GlobalScope" line 10": + +test_comments.c - IsComment In "UserDefinedEnum" from "GlobalScope" line 10": + +test_comments.c - IsComment In "UserDefinedEnum" from "GlobalScope" line 11": + +test_comments.c - IsEnumVarDecl In "UserDefinedEnum" from "GlobalScope" line 11": + +test_comments.c - IsBlockEnd In "UserDefinedEnum" from "GlobalScope" line 12": + +test_comments.c - IsEmptyLine In "GlobalScope" from "None" line 13": + +test_comments.c - IsFuncDeclaration In "GlobalScope" from "None" line 14": + +test_comments.c - IsBlockStart In "Function" from "GlobalScope" line 15": + +test_comments.c - IsComment In "Function" from "GlobalScope" line 16": + +test_comments.c - IsBlockStart In "Function" from "GlobalScope" line 17": + +test_comments.c - IsComment In "ControlStructure" from "Function" line 18": + +test_comments.c - IsComment In "ControlStructure" from "Function" line 19": + +test_comments.c - IsBlockEnd In "ControlStructure" from "Function" line 20": + +test_comments.c - IsBlockEnd In "Function" from "GlobalScope" line 21": + +test_comments.c: Error! +Error: FORBIDDEN_STRUCT (line: 1, col: 1): Struct declaration are not allowed in .c files +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: BRACE_NEWLINE (line: 1, col: 8): Expected newline before brace +Error: FORBIDDEN_TYPEDEF (line: 6, col: 1): Typedef declaration are not allowed in .c files +Error: COMMENT_ON_INSTR (line: 6, col: 9): Comment must be on its own line or at end of a line +Error: SPACE_REPLACE_TAB (line: 6, col: 25): Found space when expecting tab +Error: USER_DEFINED_TYPEDEF (line: 6, col: 26): User defined typedef must start with t_ +Error: FORBIDDEN_ENUM (line: 8, col: 1): Enum declaration are not allowed in .c files +Error: ENUM_TYPE_NAMING (line: 8, col: 6): Enum name must start with e_ +Error: BRACE_NEWLINE (line: 8, col: 11): Expected newline before brace +Error: SPACE_EMPTY_LINE (line: 9, col: 5): Space on empty line +Error: TOO_FEW_TAB (line: 10, col: 1): Missing tabs for indent level +Error: SPACE_REPLACE_TAB (line: 10, col: 5): Found space when expecting tab +Error: CONSECUTIVE_SPC (line: 10, col: 11): Two or more consecutives spaces +Error: SPACE_REPLACE_TAB (line: 11, col: 5): Found space when expecting tab +Error: TOO_FEW_TAB (line: 11, col: 16): Missing tabs for indent level +Error: SPACE_BEFORE_FUNC (line: 14, col: 5): Found space when expecting tab before function name +Error: COMMENT_ON_INSTR (line: 14, col: 12): Comment must be on its own line or at end of a line +Error: MISSING_IDENTIFIER (line: 14, col: 25): missing type qualifier or identifier in function arguments +Error: SPACE_EMPTY_LINE (line: 16, col: 4): Space on empty line +Error: WRONG_SCOPE_COMMENT (line: 16, col: 4): Comment is invalid in this scope +Error: TOO_FEW_TAB (line: 17, col: 1): Missing tabs for indent level +Error: SPACE_EMPTY_LINE (line: 17, col: 4): Space on empty line +Error: SPACE_EMPTY_LINE (line: 18, col: 7): Space on empty line +Error: WRONG_SCOPE_COMMENT (line: 18, col: 7): Comment is invalid in this scope +Error: SPACE_EMPTY_LINE (line: 19, col: 7): Space on empty line +Error: WRONG_SCOPE_COMMENT (line: 19, col: 7): Comment is invalid in this scope +Error: TOO_FEW_TAB (line: 20, col: 1): Missing tabs for indent level +Error: SPACE_EMPTY_LINE (line: 20, col: 4): Space on empty line diff --git a/norminette/tests/rules/test_file_0907.c b/tests/rules/samples/test_file_0907.c similarity index 100% rename from norminette/tests/rules/test_file_0907.c rename to tests/rules/samples/test_file_0907.c diff --git a/norminette/tests/rules/test_file_0907.out b/tests/rules/samples/test_file_0907.out similarity index 61% rename from norminette/tests/rules/test_file_0907.out rename to tests/rules/samples/test_file_0907.out index 09daa8c0..3902f7dc 100644 --- a/norminette/tests/rules/test_file_0907.out +++ b/tests/rules/samples/test_file_0907.out @@ -1,4 +1,5 @@ test_file_0907.c - IsFuncPrototype In "GlobalScope" from "None" line 1": -test_file_0907.c: KO! - ARG_TYPE_UKN (line: 1, col: 1): Unrecognized variable type +test_file_0907.c: Error! +Error: ARG_TYPE_UKN (line: 1, col: 1): Unrecognized variable type +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/test_file_0907_2.h b/tests/rules/samples/test_file_0907_2.h similarity index 100% rename from norminette/tests/rules/test_file_0907_2.h rename to tests/rules/samples/test_file_0907_2.h diff --git a/norminette/tests/rules/test_file_0907_2.out b/tests/rules/samples/test_file_0907_2.out similarity index 59% rename from norminette/tests/rules/test_file_0907_2.out rename to tests/rules/samples/test_file_0907_2.out index 3f4c5a03..852497b7 100644 --- a/norminette/tests/rules/test_file_0907_2.out +++ b/tests/rules/samples/test_file_0907_2.out @@ -1,97 +1,97 @@ test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 1": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 2": - + test_file_0907_2.h - IsEmptyLine In "GlobalScope" from "None" line 3": test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 4": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 5": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 6": - + test_file_0907_2.h - IsEmptyLine In "GlobalScope" from "None" line 7": test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 8": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 9": - + test_file_0907_2.h - IsEmptyLine In "GlobalScope" from "None" line 10": test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 11": - + test_file_0907_2.h - IsEmptyLine In "GlobalScope" from "None" line 12": test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 13": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 14": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 15": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 16": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 17": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 18": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 19": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 20": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 21": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 22": - + test_file_0907_2.h - IsEmptyLine In "GlobalScope" from "None" line 23": test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 24": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 25": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 26": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 27": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 28": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 29": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 30": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 31": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 32": - + test_file_0907_2.h - IsEmptyLine In "GlobalScope" from "None" line 33": test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 34": - + test_file_0907_2.h - IsEmptyLine In "GlobalScope" from "None" line 35": test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 36": - + test_file_0907_2.h - IsEmptyLine In "GlobalScope" from "None" line 37": test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 38": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 39": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 40": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 41": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 42": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 43": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 44": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 45": - + test_file_0907_2.h - IsEmptyLine In "GlobalScope" from "None" line 46": test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 47": - + test_file_0907_2.h - IsEmptyLine In "GlobalScope" from "None" line 48": test_file_0907_2.h - IsComment In "GlobalScope" from "None" line 49": @@ -139,95 +139,94 @@ test_file_0907_2.h - IsEmptyLine In "GlobalScope" from "None" line 73": test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 74": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 75": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 76": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 77": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 78": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 79": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 80": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 81": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 82": - + test_file_0907_2.h - IsEmptyLine In "GlobalScope" from "None" line 83": test_file_0907_2.h - IsComment In "GlobalScope" from "None" line 84": test_file_0907_2.h - IsEmptyLine In "GlobalScope" from "None" line 103": test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 104": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 105": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 106": - + test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 107": - + test_file_0907_2.h - IsEmptyLine In "GlobalScope" from "None" line 108": test_file_0907_2.h - IsComment In "GlobalScope" from "None" line 109": test_file_0907_2.h - IsEmptyLine In "GlobalScope" from "None" line 142": test_file_0907_2.h - IsPreprocessorStatement In "GlobalScope" from "None" line 143": - -test_file_0907_2.h: KO! - PREPROC_UKN_STATEMENT (line: 13, col: 1): Unrecognized preprocessor statement - PREPROC_UKN_STATEMENT (line: 38, col: 1): Unrecognized preprocessor statement + +test_file_0907_2.h: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/test_file_0907_3.c b/tests/rules/samples/test_file_0907_3.c similarity index 100% rename from norminette/tests/rules/test_file_0907_3.c rename to tests/rules/samples/test_file_0907_3.c diff --git a/norminette/tests/rules/test_file_0907_3.out b/tests/rules/samples/test_file_0907_3.out similarity index 80% rename from norminette/tests/rules/test_file_0907_3.out rename to tests/rules/samples/test_file_0907_3.out index e599cd31..ecb8f8ee 100644 --- a/norminette/tests/rules/test_file_0907_3.out +++ b/tests/rules/samples/test_file_0907_3.out @@ -38,7 +38,7 @@ test_file_0907_3.c - IsComment In "Function" from "GlobalScope" line 19": + warning. (not needed w/clang) */> test_file_0907_3.c - IsAssignation In "Function" from "GlobalScope" line 21": test_file_0907_3.c - IsEmptyLine In "Function" from "GlobalScope" line 22": @@ -121,9 +121,9 @@ test_file_0907_3.c - IsAssignation In "ControlStructure" from "Function" line 60": -test_file_0907_3.c - IsExpressionStatement In "ControlStructure" from "Function" line 61": +test_file_0907_3.c - IsAssignation In "ControlStructure" from "Function" line 61": -test_file_0907_3.c - IsExpressionStatement In "ControlStructure" from "Function" line 62": +test_file_0907_3.c - IsAssignation In "ControlStructure" from "Function" line 62": test_file_0907_3.c - IsBlockEnd In "ControlStructure" from "Function" line 63": @@ -141,7 +141,7 @@ test_file_0907_3.c - IsComment In "Function" from "GlobalScope" line 69": + playback(fscript);*/> test_file_0907_3.c - IsEmptyLine In "Function" from "GlobalScope" line 71": test_file_0907_3.c - IsControlStatement In "Function" from "GlobalScope" line 72": @@ -422,138 +422,130 @@ test_file_0907_3.c - IsBlockEnd In "Function" from "GlobalScope" line 201": -test_file_0907_3.c: KO! - TOO_MANY_TABS_FUNC (line: 1, col: 5): extra tabs before function name - SPACE_REPLACE_TAB (line: 4, col: 19): Found space when expecting tab - MULT_DECL_LINE (line: 4, col: 24): Multiple declarations on a single line - MULT_DECL_LINE (line: 6, col: 23): Multiple declarations on a single line - MULT_DECL_LINE (line: 7, col: 25): Multiple declarations on a single line - MULT_DECL_LINE (line: 11, col: 25): Multiple declarations on a single line - MULT_DECL_LINE (line: 11, col: 31): Multiple declarations on a single line - MULT_DECL_LINE (line: 11, col: 37): Multiple declarations on a single line - MULT_DECL_LINE (line: 11, col: 43): Multiple declarations on a single line - MULT_DECL_LINE (line: 11, col: 47): Multiple declarations on a single line - MULT_DECL_LINE (line: 11, col: 50): Multiple declarations on a single line - MULT_DECL_LINE (line: 12, col: 30): Multiple declarations on a single line - MULT_DECL_LINE (line: 13, col: 26): Multiple declarations on a single line - MULT_ASSIGN_LINE (line: 15, col: 17): Multiple assignations on a single line - MULT_ASSIGN_LINE (line: 15, col: 24): Multiple assignations on a single line - MULT_ASSIGN_LINE (line: 15, col: 31): Multiple assignations on a single line - TAB_INSTEAD_SPC (line: 19, col: 16): Found tab when expecting space - WRONG_SCOPE_COMMENT (line: 19, col: 17): Comment is invalid in this scope - WRONG_SCOPE_COMMENT (line: 19, col: 17): Comment is invalid in this scope - EMPTY_LINE_FUNCTION (line: 22, col: 1): Empty line in function - ASSIGN_IN_CONTROL (line: 23, col: 16): Assignment in control structure - FORBIDDEN_CS (line: 24, col: 9): Forbidden control structure - SPACE_AFTER_KW (line: 24, col: 9): Missing space after keyword - SPC_BFR_PAR (line: 24, col: 15): Missing space before parenthesis (brace/bracket) - NO_SPC_AFR_PAR (line: 24, col: 18): Extra space after parenthesis (brace/bracket) - MULT_IN_SINGLE_INSTR (line: 24, col: 20): Multiple instructions in single line control structure - TOO_FEW_TAB (line: 24, col: 20): Missing tabs for indent level - FORBIDDEN_CS (line: 25, col: 13): Forbidden control structure - SPC_AFTER_OPERATOR (line: 25, col: 21): missing space after operator - SPC_BFR_OPERATOR (line: 25, col: 21): missing space before operator - TOO_MANY_TAB (line: 27, col: 1): Extra tabs for indent level - SPACE_AFTER_KW (line: 27, col: 17): Missing space after keyword - FORBIDDEN_CS (line: 28, col: 13): Forbidden control structure - SPC_AFTER_OPERATOR (line: 28, col: 21): missing space after operator - SPC_BFR_OPERATOR (line: 28, col: 21): missing space before operator - TOO_MANY_TAB (line: 30, col: 1): Extra tabs for indent level - SPACE_AFTER_KW (line: 30, col: 17): Missing space after keyword - FORBIDDEN_CS (line: 31, col: 13): Forbidden control structure - SPC_AFTER_OPERATOR (line: 31, col: 21): missing space after operator - SPC_BFR_OPERATOR (line: 31, col: 21): missing space before operator - TOO_MANY_TAB (line: 33, col: 1): Extra tabs for indent level - SPACE_AFTER_KW (line: 33, col: 17): Missing space after keyword - FORBIDDEN_CS (line: 34, col: 13): Forbidden control structure - SPC_AFTER_OPERATOR (line: 34, col: 21): missing space after operator - SPC_BFR_OPERATOR (line: 34, col: 21): missing space before operator - TOO_MANY_TAB (line: 36, col: 1): Extra tabs for indent level - SPACE_AFTER_KW (line: 36, col: 17): Missing space after keyword - FORBIDDEN_CS (line: 37, col: 13): Forbidden control structure - SPC_AFTER_OPERATOR (line: 37, col: 21): missing space after operator - SPC_BFR_OPERATOR (line: 37, col: 21): missing space before operator - TOO_MANY_TAB (line: 39, col: 1): Extra tabs for indent level - SPACE_AFTER_KW (line: 39, col: 17): Missing space after keyword - FORBIDDEN_CS (line: 40, col: 13): Forbidden control structure - SPC_AFTER_OPERATOR (line: 40, col: 21): missing space after operator - SPC_BFR_OPERATOR (line: 40, col: 21): missing space before operator - TOO_MANY_TAB (line: 42, col: 1): Extra tabs for indent level - SPACE_AFTER_KW (line: 42, col: 17): Missing space after keyword - FORBIDDEN_CS (line: 43, col: 13): Forbidden control structure - SPC_AFTER_OPERATOR (line: 43, col: 21): missing space after operator - SPC_BFR_OPERATOR (line: 43, col: 21): missing space before operator - TOO_MANY_TAB (line: 45, col: 1): Extra tabs for indent level - SPACE_AFTER_KW (line: 45, col: 17): Missing space after keyword - FORBIDDEN_CS (line: 46, col: 13): Forbidden control structure - SPC_AFTER_OPERATOR (line: 46, col: 21): missing space after operator - SPC_BFR_OPERATOR (line: 46, col: 21): missing space before operator - TOO_MANY_TAB (line: 48, col: 1): Extra tabs for indent level - TOO_MANY_TAB (line: 49, col: 1): Extra tabs for indent level - TOO_MANY_TAB (line: 50, col: 1): Extra tabs for indent level - SPACE_AFTER_KW (line: 50, col: 17): Missing space after keyword - FORBIDDEN_CS (line: 51, col: 13): Forbidden control structure - SPC_AFTER_OPERATOR (line: 51, col: 21): missing space after operator - SPC_BFR_OPERATOR (line: 51, col: 21): missing space before operator - TOO_FEW_TAB (line: 52, col: 1): Missing tabs for indent level - SPACE_AFTER_KW (line: 52, col: 13): Missing space after keyword - SPC_AFTER_OPERATOR (line: 52, col: 20): missing space after operator - SPC_BFR_OPERATOR (line: 52, col: 20): missing space before operator - TOO_FEW_TAB (line: 53, col: 1): Missing tabs for indent level - EMPTY_LINE_FUNCTION (line: 57, col: 1): Empty line in function - BRACE_SHOULD_EOL (line: 63, col: 6): Expected newline after brace - TOO_FEW_TAB (line: 63, col: 7): Missing tabs for indent level - TOO_MANY_INSTR (line: 63, col: 7): Too many instructions on a single line - VAR_DECL_START_FUNC (line: 65, col: 1): Variable declaration not at start of function - MISALIGNED_VAR_DECL (line: 65, col: 13): Misaligned variable declaration - DECL_ASSIGN_LINE (line: 65, col: 20): Declaration and assignation on a single line - SPC_AFTER_OPERATOR (line: 65, col: 54): missing space after operator - SPC_BFR_OPERATOR (line: 65, col: 54): missing space before operator - SPC_AFTER_OPERATOR (line: 65, col: 61): missing space after operator - SPC_BFR_OPERATOR (line: 65, col: 61): missing space before operator - SPC_AFTER_OPERATOR (line: 65, col: 80): missing space after operator - SPC_BFR_OPERATOR (line: 65, col: 80): missing space before operator - SPC_AFTER_OPERATOR (line: 65, col: 87): missing space after operator - SPC_BFR_OPERATOR (line: 65, col: 87): missing space before operator - LINE_TOO_LONG (line: 65, col: 96): line too long - ASSIGN_IN_CONTROL (line: 66, col: 19): Assignment in control structure - EMPTY_LINE_FUNCTION (line: 68, col: 1): Empty line in function - WRONG_SCOPE_COMMENT (line: 69, col: 5): Comment is invalid in this scope - EMPTY_LINE_FUNCTION (line: 71, col: 1): Empty line in function - ASSIGN_IN_CONTROL (line: 72, col: 17): Assignment in control structure - NO_SPC_AFR_PAR (line: 72, col: 49): Extra space after parenthesis (brace/bracket) - TOO_FEW_TAB (line: 72, col: 51): Missing tabs for indent level - BRACE_SHOULD_EOL (line: 79, col: 6): Expected newline after brace - TOO_FEW_TAB (line: 79, col: 7): Missing tabs for indent level - TOO_MANY_INSTR (line: 79, col: 7): Too many instructions on a single line - TOO_FEW_TAB (line: 79, col: 12): Missing tabs for indent level - EMPTY_LINE_FUNCTION (line: 83, col: 1): Empty line in function - EMPTY_LINE_FUNCTION (line: 86, col: 1): Empty line in function - WRONG_SCOPE_COMMENT (line: 92, col: 9): Comment is invalid in this scope - NO_SPC_AFR_PAR (line: 93, col: 20): Extra space after parenthesis (brace/bracket) - TOO_FEW_TAB (line: 93, col: 22): Missing tabs for indent level - WRONG_SCOPE_COMMENT (line: 96, col: 13): Comment is invalid in this scope - NO_SPC_AFR_PAR (line: 97, col: 24): Extra space after parenthesis (brace/bracket) - TOO_FEW_TAB (line: 97, col: 26): Missing tabs for indent level - FORBIDDEN_CS (line: 100, col: 17): Forbidden control structure - NO_SPC_BFR_OPR (line: 100, col: 28): extra space before operator - NO_SPC_AFR_PAR (line: 100, col: 36): Extra space after parenthesis (brace/bracket) - NO_SPC_BFR_OPR (line: 100, col: 38): extra space before operator - NO_SPC_AFR_PAR (line: 100, col: 43): Extra space after parenthesis (brace/bracket) - TOO_FEW_TAB (line: 100, col: 45): Missing tabs for indent level - EMPTY_LINE_FUNCTION (line: 116, col: 1): Empty line in function - NO_SPC_AFR_PAR (line: 118, col: 18): Extra space after parenthesis (brace/bracket) - TOO_FEW_TAB (line: 118, col: 20): Missing tabs for indent level - EMPTY_LINE_FUNCTION (line: 127, col: 1): Empty line in function - MULT_ASSIGN_LINE (line: 128, col: 18): Multiple assignations on a single line - SPACE_AFTER_KW (line: 155, col: 13): Missing space after keyword - NO_SPC_AFR_PAR (line: 156, col: 50): Extra space after parenthesis (brace/bracket) - TOO_FEW_TAB (line: 156, col: 52): Missing tabs for indent level - SPACE_AFTER_KW (line: 159, col: 17): Missing space after keyword - LINE_TOO_LONG (line: 162, col: 93): line too long - NO_SPC_AFR_PAR (line: 168, col: 23): Extra space after parenthesis (brace/bracket) - TOO_FEW_TAB (line: 168, col: 25): Missing tabs for indent level - LINE_TOO_LONG (line: 172, col: 101): line too long - SPACE_AFTER_KW (line: 182, col: 17): Missing space after keyword - BRACE_SHOULD_EOL (line: 201, col: 1): Expected newline after brace - TOO_MANY_LINES (line: 201, col: 1): Function has more than 25 lines +test_file_0907_3.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: TOO_MANY_TABS_FUNC (line: 1, col: 5): extra tabs before function name +Error: MIXED_SPACE_TAB (line: 4, col: 19): Mixed spaces and tabs +Error: SPACE_REPLACE_TAB (line: 4, col: 19): Found space when expecting tab +Error: MULT_DECL_LINE (line: 4, col: 24): Multiple declarations on a single line +Error: MULT_DECL_LINE (line: 6, col: 23): Multiple declarations on a single line +Error: MULT_DECL_LINE (line: 7, col: 25): Multiple declarations on a single line +Error: TOO_MANY_VARS_FUNC (line: 8, col: 1): Too many variables declarations in a function +Error: TOO_MANY_VARS_FUNC (line: 9, col: 1): Too many variables declarations in a function +Error: TOO_MANY_VARS_FUNC (line: 10, col: 1): Too many variables declarations in a function +Error: TOO_MANY_VARS_FUNC (line: 11, col: 1): Too many variables declarations in a function +Error: MULT_DECL_LINE (line: 11, col: 25): Multiple declarations on a single line +Error: FORBIDDEN_CHAR_NAME (line: 11, col: 27): user defined identifiers should contain only lowercase characters, digits or '_' +Error: MULT_DECL_LINE (line: 11, col: 31): Multiple declarations on a single line +Error: MULT_DECL_LINE (line: 11, col: 37): Multiple declarations on a single line +Error: MULT_DECL_LINE (line: 11, col: 43): Multiple declarations on a single line +Error: MULT_DECL_LINE (line: 11, col: 47): Multiple declarations on a single line +Error: MULT_DECL_LINE (line: 11, col: 50): Multiple declarations on a single line +Error: TOO_MANY_VARS_FUNC (line: 12, col: 1): Too many variables declarations in a function +Error: MULT_DECL_LINE (line: 12, col: 30): Multiple declarations on a single line +Error: TOO_MANY_VARS_FUNC (line: 13, col: 1): Too many variables declarations in a function +Error: MULT_DECL_LINE (line: 13, col: 26): Multiple declarations on a single line +Error: MULT_ASSIGN_LINE (line: 15, col: 17): Multiple assignations on a single line +Error: MULT_ASSIGN_LINE (line: 15, col: 24): Multiple assignations on a single line +Error: MULT_ASSIGN_LINE (line: 15, col: 31): Multiple assignations on a single line +Error: TAB_INSTEAD_SPC (line: 19, col: 16): Found tab when expecting space +Error: WRONG_SCOPE_COMMENT (line: 19, col: 17): Comment is invalid in this scope +Error: WRONG_SCOPE_COMMENT (line: 19, col: 17): Comment is invalid in this scope +Error: EMPTY_LINE_FUNCTION (line: 22, col: 1): Empty line in function +Error: ASSIGN_IN_CONTROL (line: 23, col: 16): Assignment in control structure +Error: FORBIDDEN_CS (line: 24, col: 9): Forbidden control structure +Error: SPACE_AFTER_KW (line: 24, col: 9): Missing space after keyword +Error: SPC_BFR_PAR (line: 24, col: 15): Missing space before parenthesis (brace/bracket) +Error: NO_SPC_AFR_PAR (line: 24, col: 18): Extra space after parenthesis (brace/bracket) +Error: MULT_IN_SINGLE_INSTR (line: 24, col: 20): Multiple instructions in single line control structure +Error: TOO_FEW_TAB (line: 24, col: 20): Missing tabs for indent level +Error: FORBIDDEN_CS (line: 25, col: 13): Forbidden control structure +Error: TOO_MANY_TAB (line: 27, col: 1): Extra tabs for indent level +Error: SPACE_AFTER_KW (line: 27, col: 17): Missing space after keyword +Error: FORBIDDEN_CS (line: 28, col: 13): Forbidden control structure +Error: TOO_MANY_TAB (line: 30, col: 1): Extra tabs for indent level +Error: SPACE_AFTER_KW (line: 30, col: 17): Missing space after keyword +Error: FORBIDDEN_CS (line: 31, col: 13): Forbidden control structure +Error: TOO_MANY_TAB (line: 33, col: 1): Extra tabs for indent level +Error: SPACE_AFTER_KW (line: 33, col: 17): Missing space after keyword +Error: FORBIDDEN_CS (line: 34, col: 13): Forbidden control structure +Error: TOO_MANY_TAB (line: 36, col: 1): Extra tabs for indent level +Error: SPACE_AFTER_KW (line: 36, col: 17): Missing space after keyword +Error: FORBIDDEN_CS (line: 37, col: 13): Forbidden control structure +Error: TOO_MANY_TAB (line: 39, col: 1): Extra tabs for indent level +Error: SPACE_AFTER_KW (line: 39, col: 17): Missing space after keyword +Error: FORBIDDEN_CS (line: 40, col: 13): Forbidden control structure +Error: TOO_MANY_TAB (line: 42, col: 1): Extra tabs for indent level +Error: SPACE_AFTER_KW (line: 42, col: 17): Missing space after keyword +Error: FORBIDDEN_CS (line: 43, col: 13): Forbidden control structure +Error: TOO_MANY_TAB (line: 45, col: 1): Extra tabs for indent level +Error: SPACE_AFTER_KW (line: 45, col: 17): Missing space after keyword +Error: FORBIDDEN_CS (line: 46, col: 13): Forbidden control structure +Error: TOO_MANY_TAB (line: 48, col: 1): Extra tabs for indent level +Error: TOO_MANY_TAB (line: 49, col: 1): Extra tabs for indent level +Error: TOO_MANY_TAB (line: 50, col: 1): Extra tabs for indent level +Error: SPACE_AFTER_KW (line: 50, col: 17): Missing space after keyword +Error: FORBIDDEN_CS (line: 51, col: 13): Forbidden control structure +Error: TOO_FEW_TAB (line: 52, col: 1): Missing tabs for indent level +Error: SPACE_AFTER_KW (line: 52, col: 13): Missing space after keyword +Error: TOO_FEW_TAB (line: 53, col: 1): Missing tabs for indent level +Error: EMPTY_LINE_FUNCTION (line: 57, col: 1): Empty line in function +Error: BRACE_SHOULD_EOL (line: 63, col: 6): Expected newline after brace +Error: TOO_FEW_TAB (line: 63, col: 7): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 63, col: 7): Too many instructions on a single line +Error: TOO_MANY_VARS_FUNC (line: 65, col: 1): Too many variables declarations in a function +Error: VAR_DECL_START_FUNC (line: 65, col: 1): Variable declaration not at start of function +Error: MISALIGNED_VAR_DECL (line: 65, col: 13): Misaligned variable declaration +Error: DECL_ASSIGN_LINE (line: 65, col: 20): Declaration and assignation on a single line +Error: TERNARY_FBIDDEN (line: 65, col: 27): Ternaries are forbidden +Error: TERNARY_FBIDDEN (line: 65, col: 45): Ternaries are forbidden +Error: SPC_AFTER_OPERATOR (line: 65, col: 54): missing space after operator +Error: SPC_BFR_OPERATOR (line: 65, col: 54): missing space before operator +Error: SPC_AFTER_OPERATOR (line: 65, col: 61): missing space after operator +Error: SPC_BFR_OPERATOR (line: 65, col: 61): missing space before operator +Error: SPC_AFTER_OPERATOR (line: 65, col: 80): missing space after operator +Error: SPC_BFR_OPERATOR (line: 65, col: 80): missing space before operator +Error: LINE_TOO_LONG (line: 65, col: 87): line too long +Error: SPC_AFTER_OPERATOR (line: 65, col: 87): missing space after operator +Error: SPC_BFR_OPERATOR (line: 65, col: 87): missing space before operator +Error: ASSIGN_IN_CONTROL (line: 66, col: 19): Assignment in control structure +Error: EMPTY_LINE_FUNCTION (line: 68, col: 1): Empty line in function +Error: WRONG_SCOPE_COMMENT (line: 69, col: 5): Comment is invalid in this scope +Error: EMPTY_LINE_FUNCTION (line: 71, col: 1): Empty line in function +Error: ASSIGN_IN_CONTROL (line: 72, col: 17): Assignment in control structure +Error: NO_SPC_AFR_PAR (line: 72, col: 49): Extra space after parenthesis (brace/bracket) +Error: TOO_FEW_TAB (line: 72, col: 51): Missing tabs for indent level +Error: BRACE_SHOULD_EOL (line: 79, col: 6): Expected newline after brace +Error: TOO_FEW_TAB (line: 79, col: 7): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 79, col: 7): Too many instructions on a single line +Error: TOO_FEW_TAB (line: 79, col: 12): Missing tabs for indent level +Error: EMPTY_LINE_FUNCTION (line: 83, col: 1): Empty line in function +Error: EMPTY_LINE_FUNCTION (line: 86, col: 1): Empty line in function +Error: WRONG_SCOPE_COMMENT (line: 92, col: 9): Comment is invalid in this scope +Error: NO_SPC_AFR_PAR (line: 93, col: 20): Extra space after parenthesis (brace/bracket) +Error: TOO_FEW_TAB (line: 93, col: 22): Missing tabs for indent level +Error: WRONG_SCOPE_COMMENT (line: 96, col: 13): Comment is invalid in this scope +Error: NO_SPC_AFR_PAR (line: 97, col: 24): Extra space after parenthesis (brace/bracket) +Error: TOO_FEW_TAB (line: 97, col: 26): Missing tabs for indent level +Error: FORBIDDEN_CS (line: 100, col: 17): Forbidden control structure +Error: NO_SPC_BFR_OPR (line: 100, col: 28): extra space before operator +Error: NO_SPC_AFR_PAR (line: 100, col: 36): Extra space after parenthesis (brace/bracket) +Error: NO_SPC_BFR_OPR (line: 100, col: 38): extra space before operator +Error: NO_SPC_AFR_PAR (line: 100, col: 43): Extra space after parenthesis (brace/bracket) +Error: TOO_FEW_TAB (line: 100, col: 45): Missing tabs for indent level +Error: EMPTY_LINE_FUNCTION (line: 116, col: 1): Empty line in function +Error: NO_SPC_AFR_PAR (line: 118, col: 18): Extra space after parenthesis (brace/bracket) +Error: TOO_FEW_TAB (line: 118, col: 20): Missing tabs for indent level +Error: EMPTY_LINE_FUNCTION (line: 127, col: 1): Empty line in function +Error: MULT_ASSIGN_LINE (line: 128, col: 18): Multiple assignations on a single line +Error: SPACE_AFTER_KW (line: 155, col: 13): Missing space after keyword +Error: NO_SPC_AFR_PAR (line: 156, col: 50): Extra space after parenthesis (brace/bracket) +Error: TOO_FEW_TAB (line: 156, col: 52): Missing tabs for indent level +Error: SPACE_AFTER_KW (line: 159, col: 17): Missing space after keyword +Error: LINE_TOO_LONG (line: 162, col: 86): line too long +Error: NO_SPC_AFR_PAR (line: 168, col: 23): Extra space after parenthesis (brace/bracket) +Error: TOO_FEW_TAB (line: 168, col: 25): Missing tabs for indent level +Error: LINE_TOO_LONG (line: 172, col: 86): line too long +Error: SPACE_AFTER_KW (line: 182, col: 17): Missing space after keyword +Error: BRACE_SHOULD_EOL (line: 201, col: 1): Expected newline after brace +Error: TOO_MANY_LINES (line: 201, col: 1): Function has more than 25 lines diff --git a/norminette/tests/rules/test_file_0907_4.c b/tests/rules/samples/test_file_0907_4.c similarity index 100% rename from norminette/tests/rules/test_file_0907_4.c rename to tests/rules/samples/test_file_0907_4.c diff --git a/norminette/tests/rules/test_file_0907_4.out b/tests/rules/samples/test_file_0907_4.out similarity index 77% rename from norminette/tests/rules/test_file_0907_4.out rename to tests/rules/samples/test_file_0907_4.out index aeed7fc5..06c7aef3 100644 --- a/norminette/tests/rules/test_file_0907_4.out +++ b/tests/rules/samples/test_file_0907_4.out @@ -8,6 +8,7 @@ test_file_0907_4.c - IsBlockEnd In "Function" from "GlobalScope" line 5": -test_file_0907_4.c: KO! - EMPTY_LINE_FILE_START (line: 1, col: 1): Empty line at start of file - BRACE_SHOULD_EOL (line: 5, col: 1): Expected newline after brace +test_file_0907_4.c: Error! +Error: EMPTY_LINE_FILE_START (line: 1, col: 1): Empty line at start of file +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: BRACE_SHOULD_EOL (line: 5, col: 1): Expected newline after brace diff --git a/norminette/tests/rules/test_file_0924.c b/tests/rules/samples/test_file_0924.c similarity index 100% rename from norminette/tests/rules/test_file_0924.c rename to tests/rules/samples/test_file_0924.c diff --git a/norminette/tests/rules/test_file_0924.out b/tests/rules/samples/test_file_0924.out similarity index 66% rename from norminette/tests/rules/test_file_0924.out rename to tests/rules/samples/test_file_0924.out index 6d913b0f..b568219b 100644 --- a/norminette/tests/rules/test_file_0924.out +++ b/tests/rules/samples/test_file_0924.out @@ -22,16 +22,17 @@ test_file_0924.c - IsEmptyLine In "GlobalScope" from "None" line 9": -test_file_0924.c: KO! - SPACE_BEFORE_FUNC (line: 1, col: 5): space before function name - BRACE_NEWLINE (line: 1, col: 47): Expected newline before brace - TOO_MANY_TAB (line: 5, col: 1): Extra tabs for indent level - BRACE_SHOULD_EOL (line: 5, col: 13): Expected newline after brace - TOO_FEW_TAB (line: 5, col: 14): Missing tabs for indent level - TOO_MANY_INSTR (line: 5, col: 14): Too many instructions on a single line - ASSIGN_IN_CONTROL (line: 5, col: 22): Assignment in control structure - TOO_FEW_TAB (line: 6, col: 1): Missing tabs for indent level - NO_SPC_BFR_OPR (line: 6, col: 20): extra space before operator - TOO_FEW_TAB (line: 6, col: 21): Missing tabs for indent level - TOO_MANY_INSTR (line: 6, col: 21): Too many instructions on a single line - EMPTY_LINE_EOF (line: 9, col: 1): Empty line at end of file +test_file_0924.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: SPACE_BEFORE_FUNC (line: 1, col: 5): Found space when expecting tab before function name +Error: BRACE_NEWLINE (line: 1, col: 47): Expected newline before brace +Error: TOO_MANY_TAB (line: 5, col: 1): Extra tabs for indent level +Error: BRACE_SHOULD_EOL (line: 5, col: 13): Expected newline after brace +Error: TOO_FEW_TAB (line: 5, col: 14): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 5, col: 14): Too many instructions on a single line +Error: ASSIGN_IN_CONTROL (line: 5, col: 22): Assignment in control structure +Error: TOO_FEW_TAB (line: 6, col: 1): Missing tabs for indent level +Error: NO_SPC_BFR_OPR (line: 6, col: 20): extra space before operator +Error: TOO_FEW_TAB (line: 6, col: 21): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 6, col: 21): Too many instructions on a single line +Error: EMPTY_LINE_EOF (line: 9, col: 1): Empty line at end of file diff --git a/norminette/tests/rules/test_file_1012.c b/tests/rules/samples/test_file_1012.c similarity index 100% rename from norminette/tests/rules/test_file_1012.c rename to tests/rules/samples/test_file_1012.c diff --git a/norminette/tests/rules/test_file_1012.out b/tests/rules/samples/test_file_1012.out similarity index 99% rename from norminette/tests/rules/test_file_1012.out rename to tests/rules/samples/test_file_1012.out index 30bcafa7..05a7be34 100644 --- a/norminette/tests/rules/test_file_1012.out +++ b/tests/rules/samples/test_file_1012.out @@ -23,7 +23,7 @@ test_file_1012.c - IsEmptyLine In "GlobalScope" from "None" line 12": test_file_1012.c - IsPreprocessorStatement In "GlobalScope" from "None" line 13": - + test_file_1012.c - IsEmptyLine In "GlobalScope" from "None" line 14": test_file_1012.c - IsFuncDeclaration In "GlobalScope" from "None" line 15": @@ -187,4 +187,5 @@
test_file_1012.c - IsBlockEnd In "Function" from "GlobalScope" line 99": -test_file_1012.c: OK! +test_file_1012.c: Error! +Error: SPACE_AFTER_KW (line: 71, col: 15): Missing space after keyword diff --git a/norminette/tests/rules/test_file_1012_2.c b/tests/rules/samples/test_file_1012_2.c similarity index 100% rename from norminette/tests/rules/test_file_1012_2.c rename to tests/rules/samples/test_file_1012_2.c diff --git a/norminette/tests/rules/test_file_1012_2.out b/tests/rules/samples/test_file_1012_2.out similarity index 98% rename from norminette/tests/rules/test_file_1012_2.out rename to tests/rules/samples/test_file_1012_2.out index 5715b59d..7d670e28 100644 --- a/norminette/tests/rules/test_file_1012_2.out +++ b/tests/rules/samples/test_file_1012_2.out @@ -23,7 +23,7 @@ test_file_1012_2.c - IsEmptyLine In "GlobalScope" from "None" line 12": test_file_1012_2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 13": - + test_file_1012_2.c - IsEmptyLine In "GlobalScope" from "None" line 14": test_file_1012_2.c - IsFuncDeclaration In "GlobalScope" from "None" line 15": @@ -153,6 +153,6 @@ test_file_1012_2.c - IsBlockEnd In "Function" from "GlobalScope" line 82": -test_file_1012_2.c: KO! - TOO_FEW_TAB (line: 47, col: 1): Missing tabs for indent level - TOO_FEW_TAB (line: 51, col: 1): Missing tabs for indent level +test_file_1012_2.c: Error! +Error: TOO_FEW_TAB (line: 47, col: 1): Missing tabs for indent level +Error: TOO_FEW_TAB (line: 51, col: 1): Missing tabs for indent level diff --git a/norminette/tests/rules/test_file_1012_3.h b/tests/rules/samples/test_file_1012_3.h similarity index 100% rename from norminette/tests/rules/test_file_1012_3.h rename to tests/rules/samples/test_file_1012_3.h diff --git a/norminette/tests/rules/test_file_1012_3.out b/tests/rules/samples/test_file_1012_3.out similarity index 85% rename from norminette/tests/rules/test_file_1012_3.out rename to tests/rules/samples/test_file_1012_3.out index 0716d127..c1fe3630 100644 --- a/norminette/tests/rules/test_file_1012_3.out +++ b/tests/rules/samples/test_file_1012_3.out @@ -23,13 +23,13 @@ test_file_1012_3.h - IsEmptyLine In "GlobalScope" from "None" line 12": test_file_1012_3.h - IsPreprocessorStatement In "GlobalScope" from "None" line 13": - + test_file_1012_3.h - IsPreprocessorStatement In "GlobalScope" from "None" line 14": - + test_file_1012_3.h - IsEmptyLine In "GlobalScope" from "None" line 15": test_file_1012_3.h - IsPreprocessorStatement In "GlobalScope" from "None" line 16": - + test_file_1012_3.h - IsEmptyLine In "GlobalScope" from "None" line 17": test_file_1012_3.h - IsVarDeclaration In "GlobalScope" from "None" line 18": @@ -39,9 +39,9 @@ test_file_1012_3.h - IsComment In "GlobalScope" from "None" line 20": test_file_1012_3.h - IsEmptyLine In "GlobalScope" from "None" line 26": @@ -62,8 +62,8 @@ test_file_1012_3.h - IsComment In "GlobalScope" from "None" line 35": test_file_1012_3.h - IsEmptyLine In "GlobalScope" from "None" line 40": @@ -74,13 +74,13 @@ test_file_1012_3.h - IsComment In "GlobalScope" from "None" line 43": test_file_1012_3.h - IsEmptyLine In "GlobalScope" from "None" line 53": @@ -91,12 +91,12 @@ test_file_1012_3.h - IsComment In "GlobalScope" from "None" line 56": test_file_1012_3.h - IsEmptyLine In "GlobalScope" from "None" line 65": @@ -108,8 +108,8 @@ test_file_1012_3.h - IsEmptyLine In "GlobalScope" from "None" line 75": @@ -119,5 +119,6 @@ test_file_1012_3.h - IsEmptyLine In "GlobalScope" from "None" line 77": test_file_1012_3.h - IsPreprocessorStatement In "GlobalScope" from "None" line 78": - + test_file_1012_3.h: OK! +Notice: GLOBAL_VAR_DETECTED (line: 18, col: 1): Global variable present in file. Make sure it is a reasonable choice. diff --git a/norminette/tests/rules/test_file_1012_4.c b/tests/rules/samples/test_file_1012_4.c similarity index 100% rename from norminette/tests/rules/test_file_1012_4.c rename to tests/rules/samples/test_file_1012_4.c diff --git a/tests/rules/samples/test_file_1012_4.out b/tests/rules/samples/test_file_1012_4.out new file mode 100644 index 00000000..901566b1 --- /dev/null +++ b/tests/rules/samples/test_file_1012_4.out @@ -0,0 +1,8 @@ +test_file_1012_4.c - IsVarDeclaration In "GlobalScope" from "None" line 1": + +test_file_1012_4.c - IsVarDeclaration In "GlobalScope" from "None" line 2": + +test_file_1012_4.c: Error! +Notice: GLOBAL_VAR_DETECTED (line: 1, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Notice: GLOBAL_VAR_DETECTED (line: 2, col: 1): Global variable present in file. Make sure it is a reasonable choice. diff --git a/norminette/tests/rules/test_file_1019.c b/tests/rules/samples/test_file_1019.c similarity index 100% rename from norminette/tests/rules/test_file_1019.c rename to tests/rules/samples/test_file_1019.c diff --git a/norminette/tests/rules/test_file_1019.out b/tests/rules/samples/test_file_1019.out similarity index 84% rename from norminette/tests/rules/test_file_1019.out rename to tests/rules/samples/test_file_1019.out index dbddffe9..d53d6efe 100644 --- a/norminette/tests/rules/test_file_1019.out +++ b/tests/rules/samples/test_file_1019.out @@ -100,7 +100,7 @@ test_file_1019.c - IsFunctionCall In "Function" from "GlobalScope" line 51": -test_file_1019.c - IsDeclaration In "Function" from "GlobalScope" line 52": +test_file_1019.c - IsLabel In "Function" from "GlobalScope" line 52": test_file_1019.c - IsExpressionStatement In "Function" from "GlobalScope" line 53": @@ -160,19 +160,28 @@ test_file_1019.c - IsBlockEnd In "Function" from "GlobalScope" line 81": -test_file_1019.c: KO! - RETURN_PARENTHESIS (line: 8, col: 12): Return value must be in parenthesis - COMMENT_ON_INSTR (line: 35, col: 25): Comment must be on its own line - WRONG_SCOPE_COMMENT (line: 41, col: 10): Comment is invalid in this scope - COMMENT_ON_INSTR (line: 41, col: 29): Comment must be on its own line - WRONG_SCOPE_COMMENT (line: 42, col: 10): Comment is invalid in this scope - COMMENT_ON_INSTR (line: 42, col: 29): Comment must be on its own line - TOO_MANY_FUNCS (line: 46, col: 1): Too many functions in file - TOO_MANY_FUNCS (line: 56, col: 1): Too many functions in file - WRONG_SCOPE_COMMENT (line: 62, col: 27): Comment is invalid in this scope - COMMENT_ON_INSTR (line: 62, col: 47): Comment must be on its own line - TOO_MANY_FUNCS (line: 66, col: 1): Too many functions in file - WRONG_SCOPE_COMMENT (line: 70, col: 14): Comment is invalid in this scope - COMMENT_ON_INSTR (line: 70, col: 32): Comment must be on its own line - TOO_MANY_FUNCS (line: 73, col: 1): Too many functions in file - TOO_MANY_FUNCS (line: 78, col: 1): Too many functions in file +test_file_1019.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: RETURN_PARENTHESIS (line: 8, col: 12): Return value must be in parenthesis +Error: FORBIDDEN_STRUCT (line: 11, col: 1): Struct declaration are not allowed in .c files +Error: FORBIDDEN_CHAR_NAME (line: 28, col: 9): user defined identifiers should contain only lowercase characters, digits or '_' +Error: COMMENT_ON_INSTR (line: 35, col: 6): Comment must be on its own line or at end of a line +Error: COMMENT_ON_INSTR (line: 41, col: 10): Comment must be on its own line or at end of a line +Error: WRONG_SCOPE_COMMENT (line: 41, col: 10): Comment is invalid in this scope +Error: COMMENT_ON_INSTR (line: 42, col: 10): Comment must be on its own line or at end of a line +Error: WRONG_SCOPE_COMMENT (line: 42, col: 10): Comment is invalid in this scope +Error: TOO_MANY_FUNCS (line: 46, col: 1): Too many functions in file +Error: GOTO_FBIDDEN (line: 50, col: 1): Goto statements are forbidden +Error: LABEL_FBIDDEN (line: 52, col: 1): Label statements are forbidden +Error: TOO_MANY_FUNCS (line: 56, col: 1): Too many functions in file +Error: COMMENT_ON_INSTR (line: 62, col: 27): Comment must be on its own line or at end of a line +Error: WRONG_SCOPE_COMMENT (line: 62, col: 27): Comment is invalid in this scope +Error: TOO_MANY_FUNCS (line: 66, col: 1): Too many functions in file +Error: COMMENT_ON_INSTR (line: 70, col: 14): Comment must be on its own line or at end of a line +Error: WRONG_SCOPE_COMMENT (line: 70, col: 14): Comment is invalid in this scope +Error: TOO_MANY_FUNCS (line: 73, col: 1): Too many functions in file +Error: SPC_BEFORE_NL (line: 73, col: 17): Space before newline +Error: TOO_MANY_FUNCS (line: 78, col: 1): Too many functions in file +Error: SPC_BEFORE_NL (line: 78, col: 17): Space before newline +Error: SPC_BEFORE_NL (line: 79, col: 2): Space before newline +Error: SPC_BEFORE_NL (line: 81, col: 2): Space before newline diff --git a/norminette/tests/rules/test_file_1019_1.c b/tests/rules/samples/test_file_1019_1.c similarity index 100% rename from norminette/tests/rules/test_file_1019_1.c rename to tests/rules/samples/test_file_1019_1.c diff --git a/norminette/tests/rules/test_file_1019_1.out b/tests/rules/samples/test_file_1019_1.out similarity index 80% rename from norminette/tests/rules/test_file_1019_1.out rename to tests/rules/samples/test_file_1019_1.out index 8f8e9ce4..b456b3f9 100644 --- a/norminette/tests/rules/test_file_1019_1.out +++ b/tests/rules/samples/test_file_1019_1.out @@ -1,7 +1,7 @@ test_file_1019_1.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": - + test_file_1019_1.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": - + test_file_1019_1.c - IsEmptyLine In "GlobalScope" from "None" line 3": test_file_1019_1.c - IsUserDefinedType In "GlobalScope" from "None" line 4": @@ -28,4 +28,8 @@ test_file_1019_1.c - IsBlockEnd In "Function" from "GlobalScope" line 15": -test_file_1019_1.c: OK! +test_file_1019_1.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: PREPROC_BAD_IFNDEF (line: 1, col: 1): Ifndef preprocessor statement without endif +Error: FORBIDDEN_TYPEDEF (line: 4, col: 1): Typedef declaration are not allowed in .c files +Error: FORBIDDEN_STRUCT (line: 5, col: 1): Struct declaration are not allowed in .c files diff --git a/norminette/tests/rules/test_file_1022.c b/tests/rules/samples/test_file_1022.c similarity index 100% rename from norminette/tests/rules/test_file_1022.c rename to tests/rules/samples/test_file_1022.c diff --git a/norminette/tests/rules/test_file_1022.out b/tests/rules/samples/test_file_1022.out similarity index 83% rename from norminette/tests/rules/test_file_1022.out rename to tests/rules/samples/test_file_1022.out index d1f3d922..dff6f6eb 100644 --- a/norminette/tests/rules/test_file_1022.out +++ b/tests/rules/samples/test_file_1022.out @@ -6,7 +6,7 @@ test_file_1022.c - IsVarDeclaration In "UserDefinedType" from "GlobalScope" line 4": -test_file_1022.c - IsFuncPrototype In "UserDefinedType" from "GlobalScope" line 5": +test_file_1022.c - IsVarDeclaration In "UserDefinedType" from "GlobalScope" line 5": test_file_1022.c - IsVarDeclaration In "UserDefinedType" from "GlobalScope" line 6": @@ -26,5 +26,8 @@ test_file_1022.c - IsBlockEnd In "UserDefinedType" from "GlobalScope" line 14": -test_file_1022.c: KO! - SPACE_REPLACE_TAB (line: 9, col: 4): Found space when expecting tab +test_file_1022.c: Error! +Error: FORBIDDEN_STRUCT (line: 1, col: 1): Struct declaration are not allowed in .c files +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: SPACE_REPLACE_TAB (line: 9, col: 4): Found space when expecting tab +Error: FORBIDDEN_STRUCT (line: 11, col: 1): Struct declaration are not allowed in .c files diff --git a/norminette/tests/rules/test_file_1116.c b/tests/rules/samples/test_file_1116.c similarity index 100% rename from norminette/tests/rules/test_file_1116.c rename to tests/rules/samples/test_file_1116.c diff --git a/norminette/tests/rules/test_file_1116.out b/tests/rules/samples/test_file_1116.out similarity index 91% rename from norminette/tests/rules/test_file_1116.out rename to tests/rules/samples/test_file_1116.out index 12e4eab6..a0210f5e 100644 --- a/norminette/tests/rules/test_file_1116.out +++ b/tests/rules/samples/test_file_1116.out @@ -12,4 +12,5 @@ test_file_1116.c - IsBlockEnd In "Function" from "GlobalScope" line 7": -test_file_1116.c: OK! +test_file_1116.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/test_file_1116_1.c b/tests/rules/samples/test_file_1116_1.c similarity index 100% rename from norminette/tests/rules/test_file_1116_1.c rename to tests/rules/samples/test_file_1116_1.c diff --git a/norminette/tests/rules/test_file_1116_1.out b/tests/rules/samples/test_file_1116_1.out similarity index 84% rename from norminette/tests/rules/test_file_1116_1.out rename to tests/rules/samples/test_file_1116_1.out index edeec6ed..bfcdb932 100644 --- a/norminette/tests/rules/test_file_1116_1.out +++ b/tests/rules/samples/test_file_1116_1.out @@ -12,4 +12,6 @@ test_file_1116_1.c - IsBlockEnd In "UserDefinedEnum" from "GlobalScope" line 7": -test_file_1116_1.c: OK! +test_file_1116_1.c: Error! +Error: FORBIDDEN_ENUM (line: 1, col: 1): Enum declaration are not allowed in .c files +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/test_file_1116_2.c b/tests/rules/samples/test_file_1116_2.c similarity index 100% rename from norminette/tests/rules/test_file_1116_2.c rename to tests/rules/samples/test_file_1116_2.c diff --git a/norminette/tests/rules/test_file_1116_2.out b/tests/rules/samples/test_file_1116_2.out similarity index 89% rename from norminette/tests/rules/test_file_1116_2.out rename to tests/rules/samples/test_file_1116_2.out index e52295d5..eeea97bd 100644 --- a/norminette/tests/rules/test_file_1116_2.out +++ b/tests/rules/samples/test_file_1116_2.out @@ -18,4 +18,6 @@ test_file_1116_2.c - IsAssignation In "GlobalScope" from "None" line 10": -test_file_1116_2.c: OK! +test_file_1116_2.c: Error! +Error: FORBIDDEN_ENUM (line: 1, col: 1): Enum declaration are not allowed in .c files +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/test_file_1116_3.c b/tests/rules/samples/test_file_1116_3.c similarity index 100% rename from norminette/tests/rules/test_file_1116_3.c rename to tests/rules/samples/test_file_1116_3.c diff --git a/norminette/tests/rules/test_file_1116_3.out b/tests/rules/samples/test_file_1116_3.out similarity index 90% rename from norminette/tests/rules/test_file_1116_3.out rename to tests/rules/samples/test_file_1116_3.out index d82aa33d..d9f4eac9 100644 --- a/norminette/tests/rules/test_file_1116_3.out +++ b/tests/rules/samples/test_file_1116_3.out @@ -8,4 +8,5 @@ test_file_1116_3.c - IsBlockEnd In "Function" from "GlobalScope" line 5": -test_file_1116_3.c: OK! +test_file_1116_3.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/tests/rules/samples/test_file_1807.c b/tests/rules/samples/test_file_1807.c new file mode 100644 index 00000000..ec471177 --- /dev/null +++ b/tests/rules/samples/test_file_1807.c @@ -0,0 +1,15 @@ +int main(void) +{ + x() && y() || z(); + x(), y(), z(); + x(1, 2 && 3) ->x >>= y(1, 3, 4), z("hello") && 3; + ((((int********))))((x()))->back *= x((((int)))(x), 2, 3, 4); + main = main(main, main, main); + main-> if = x(("bla", "ble", "bli", "blo", "blu")); + x = ((&main->x->x()) && x(), 2); + (x = y->x, x->x)->x = "eita"; + (x)( + "e se for", "uma string grande", + ) -> x =(x) + ; +} diff --git a/tests/rules/samples/test_file_1807.out b/tests/rules/samples/test_file_1807.out new file mode 100644 index 00000000..47571d68 --- /dev/null +++ b/tests/rules/samples/test_file_1807.out @@ -0,0 +1,59 @@ +test_file_1807.c - IsFuncDeclaration In "GlobalScope" from "None" line 1": + +test_file_1807.c - IsBlockStart In "Function" from "GlobalScope" line 2": + +test_file_1807.c - IsFunctionCall In "Function" from "GlobalScope" line 3": + +test_file_1807.c - IsFunctionCall In "Function" from "GlobalScope" line 3": + +test_file_1807.c - IsFunctionCall In "Function" from "GlobalScope" line 3": + +test_file_1807.c - IsFunctionCall In "Function" from "GlobalScope" line 4": + +test_file_1807.c - IsFunctionCall In "Function" from "GlobalScope" line 4": + +test_file_1807.c - IsFunctionCall In "Function" from "GlobalScope" line 4": + +test_file_1807.c - IsFunctionCall In "Function" from "GlobalScope" line 5": + +test_file_1807.c - IsAssignation In "Function" from "GlobalScope" line 6": + +test_file_1807.c - IsAssignation In "Function" from "GlobalScope" line 7": + +test_file_1807.c - IsDeclaration In "Function" from "GlobalScope" line 8": + +test_file_1807.c - IsAssignation In "Function" from "GlobalScope" line 9": + +test_file_1807.c - IsAssignation In "Function" from "GlobalScope" line 10": + +test_file_1807.c - IsAssignation In "Function" from "GlobalScope" line 10": + +test_file_1807.c - IsDeclaration In "Function" from "GlobalScope" line 11": + + + + +test_file_1807.c - IsBlockEnd In "Function" from "GlobalScope" line 15": + +test_file_1807.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: TOO_FEW_TAB (line: 3, col: 12): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 3, col: 12): Too many instructions on a single line +Error: TOO_FEW_TAB (line: 3, col: 19): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 3, col: 19): Too many instructions on a single line +Error: TOO_FEW_TAB (line: 4, col: 10): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 4, col: 10): Too many instructions on a single line +Error: TOO_FEW_TAB (line: 4, col: 15): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 4, col: 15): Too many instructions on a single line +Error: NO_SPC_AFR_PAR (line: 5, col: 16): Extra space after parenthesis (brace/bracket) +Error: TAB_INSTEAD_SPC (line: 5, col: 17): Found tab when expecting space +Error: SPACE_AFTER_KW (line: 6, col: 9): Missing space after keyword +Error: TAB_INSTEAD_SPC (line: 8, col: 11): Found tab when expecting space +Error: TOO_FEW_TAB (line: 10, col: 16): Missing tabs for indent level +Error: TOO_FEW_TAB (line: 10, col: 16): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 10, col: 16): Too many instructions on a single line +Error: MULT_ASSIGN_LINE (line: 10, col: 25): Multiple assignations on a single line +Error: NO_SPC_AFR_PAR (line: 13, col: 5): Extra space after parenthesis (brace/bracket) +Error: TAB_INSTEAD_SPC (line: 13, col: 6): Found tab when expecting space +Error: SPC_BFR_OPERATOR (line: 13, col: 61): missing space before operator +Error: SPC_BFR_PAR (line: 13, col: 62): Missing space before parenthesis (brace/bracket) diff --git a/tests/rules/samples/test_file_2007.c b/tests/rules/samples/test_file_2007.c new file mode 100644 index 00000000..fab29cc3 --- /dev/null +++ b/tests/rules/samples/test_file_2007.c @@ -0,0 +1,16 @@ +int philo(int argc, char const *argv[], char *const envp[]) +{ + char *str; + + if (argc != 1) + { + ft_putstr("\033[0;33mUsage: ./philo number_of_philosophers time_to_die"); + ft_putstr("time_to_eat time_to_sleep [number_of_times_each_philosopher"); + ft_putstr("_must_eat]\033[0m\n"); + ft_putstr("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"); + ft_putstr("time_to_eat time_to_sleep [number_of_times_each_philosopher"); + } + if (c == ' ' & c == '\t' & c == '\n' & c == '\v' & c == '\f' & c == '\r') + return (1); + (void) argv; +} diff --git a/tests/rules/samples/test_file_2007.out b/tests/rules/samples/test_file_2007.out new file mode 100644 index 00000000..3e586220 --- /dev/null +++ b/tests/rules/samples/test_file_2007.out @@ -0,0 +1,39 @@ +test_file_2007.c - IsFuncDeclaration In "GlobalScope" from "None" line 1": + +test_file_2007.c - IsBlockStart In "Function" from "GlobalScope" line 2": + +test_file_2007.c - IsVarDeclaration In "Function" from "GlobalScope" line 3": + +test_file_2007.c - IsEmptyLine In "Function" from "GlobalScope" line 4": + +test_file_2007.c - IsControlStatement In "Function" from "GlobalScope" line 5": + +test_file_2007.c - IsBlockStart In "ControlStructure" from "Function" line 6": + +test_file_2007.c - IsFunctionCall In "ControlStructure" from "Function" line 7": + +test_file_2007.c - IsFunctionCall In "ControlStructure" from "Function" line 8": + +test_file_2007.c - IsFunctionCall In "ControlStructure" from "Function" line 9": + +test_file_2007.c - IsFunctionCall In "ControlStructure" from "Function" line 10": + +test_file_2007.c - IsFunctionCall In "ControlStructure" from "Function" line 11": + +test_file_2007.c - IsBlockEnd In "ControlStructure" from "Function" line 12": + +test_file_2007.c - IsControlStatement In "Function" from "GlobalScope" line 13": + +test_file_2007.c - IsExpressionStatement In "ControlStructure" from "Function" line 14": + +test_file_2007.c - IsExpressionStatement In "Function" from "GlobalScope" line 15": + +test_file_2007.c - IsBlockEnd In "Function" from "GlobalScope" line 16": + +test_file_2007.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: SPACE_REPLACE_TAB (line: 3, col: 9): Found space when expecting tab +Error: LINE_TOO_LONG (line: 7, col: 82): line too long +Error: LINE_TOO_LONG (line: 8, col: 82): line too long +Error: LINE_TOO_LONG (line: 10, col: 117): line too long +Error: LINE_TOO_LONG (line: 11, col: 82): line too long diff --git a/norminette/tests/rules/test_file_210128.c b/tests/rules/samples/test_file_210128.c similarity index 100% rename from norminette/tests/rules/test_file_210128.c rename to tests/rules/samples/test_file_210128.c diff --git a/norminette/tests/rules/test_file_210128.out b/tests/rules/samples/test_file_210128.out similarity index 74% rename from norminette/tests/rules/test_file_210128.out rename to tests/rules/samples/test_file_210128.out index e462862b..7686460b 100644 --- a/norminette/tests/rules/test_file_210128.out +++ b/tests/rules/samples/test_file_210128.out @@ -13,9 +13,9 @@ test_file_210128.c - IsComment In "Function" from "GlobalScope" line 6": + */> test_file_210128.c - IsAssignation In "Function" from "GlobalScope" line 10": test_file_210128.c - IsAssignation In "Function" from "GlobalScope" line 11": @@ -27,7 +27,7 @@ test_file_210128.c - IsAssignation In "ControlStructure" from "Function" line 15": -test_file_210128.c - IsExpressionStatement In "ControlStructure" from "Function" line 16": +test_file_210128.c - IsAssignation In "ControlStructure" from "Function" line 16": test_file_210128.c - IsBlockEnd In "ControlStructure" from "Function" line 17": @@ -37,14 +37,14 @@ test_file_210128.c - IsBlockEnd In "Function" from "GlobalScope" line 19": -test_file_210128.c: KO! - WRONG_SCOPE_COMMENT (line: 1, col: 15): Comment is invalid in this scope - WRONG_SCOPE_COMMENT (line: 5, col: 14): Comment is invalid in this scope - WRONG_SCOPE_COMMENT (line: 5, col: 14): Comment is invalid in this scope - WRONG_SCOPE_COMMENT (line: 5, col: 63): Comment is invalid in this scope - WRONG_SCOPE_COMMENT (line: 5, col: 63): Comment is invalid in this scope - WRONG_SCOPE_COMMENT (line: 5, col: 63): Comment is invalid in this scope - WRONG_SCOPE_COMMENT (line: 6, col: 5): Comment is invalid in this scope - WRONG_SCOPE_COMMENT (line: 12, col: 36): Comment is invalid in this scope - WRONG_SCOPE_COMMENT (line: 18, col: 17): Comment is invalid in this scope - WRONG_SCOPE_COMMENT (line: 18, col: 17): Comment is invalid in this scope +test_file_210128.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: WRONG_SCOPE_COMMENT (line: 5, col: 14): Comment is invalid in this scope +Error: WRONG_SCOPE_COMMENT (line: 5, col: 14): Comment is invalid in this scope +Error: WRONG_SCOPE_COMMENT (line: 5, col: 63): Comment is invalid in this scope +Error: WRONG_SCOPE_COMMENT (line: 5, col: 63): Comment is invalid in this scope +Error: WRONG_SCOPE_COMMENT (line: 5, col: 63): Comment is invalid in this scope +Error: WRONG_SCOPE_COMMENT (line: 6, col: 5): Comment is invalid in this scope +Error: WRONG_SCOPE_COMMENT (line: 12, col: 36): Comment is invalid in this scope +Error: WRONG_SCOPE_COMMENT (line: 18, col: 17): Comment is invalid in this scope +Error: WRONG_SCOPE_COMMENT (line: 18, col: 17): Comment is invalid in this scope diff --git a/norminette/tests/rules/test_file_210128_2.c b/tests/rules/samples/test_file_210128_2.c similarity index 100% rename from norminette/tests/rules/test_file_210128_2.c rename to tests/rules/samples/test_file_210128_2.c diff --git a/norminette/tests/rules/test_file_210128_2.out b/tests/rules/samples/test_file_210128_2.out similarity index 58% rename from norminette/tests/rules/test_file_210128_2.out rename to tests/rules/samples/test_file_210128_2.out index 05b8e857..84583512 100644 --- a/norminette/tests/rules/test_file_210128_2.out +++ b/tests/rules/samples/test_file_210128_2.out @@ -4,7 +4,8 @@ test_file_210128_2.c - IsBlockEnd In "Function" from "GlobalScope" line 1": -test_file_210128_2.c: KO! - BRACE_NEWLINE (line: 1, col: 48): Expected newline before brace - BRACE_SHOULD_EOL (line: 1, col: 48): Expected newline after brace - TOO_MANY_INSTR (line: 1, col: 49): Too many instructions on a single line +test_file_210128_2.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: BRACE_NEWLINE (line: 1, col: 48): Expected newline before brace +Error: BRACE_SHOULD_EOL (line: 1, col: 48): Expected newline after brace +Error: TOO_MANY_INSTR (line: 1, col: 49): Too many instructions on a single line diff --git a/norminette/tests/rules/test_file_210131.c b/tests/rules/samples/test_file_210131.c similarity index 100% rename from norminette/tests/rules/test_file_210131.c rename to tests/rules/samples/test_file_210131.c diff --git a/norminette/tests/rules/test_file_210131.out b/tests/rules/samples/test_file_210131.out similarity index 92% rename from norminette/tests/rules/test_file_210131.out rename to tests/rules/samples/test_file_210131.out index 8692dbd5..8caedd9b 100644 --- a/norminette/tests/rules/test_file_210131.out +++ b/tests/rules/samples/test_file_210131.out @@ -12,4 +12,5 @@ test_file_210131.c - IsBlockEnd In "Function" from "GlobalScope" line 8": -test_file_210131.c: OK! +test_file_210131.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/test_file_210201.c b/tests/rules/samples/test_file_210201.c similarity index 100% rename from norminette/tests/rules/test_file_210201.c rename to tests/rules/samples/test_file_210201.c diff --git a/norminette/tests/rules/test_file_210201.out b/tests/rules/samples/test_file_210201.out similarity index 84% rename from norminette/tests/rules/test_file_210201.out rename to tests/rules/samples/test_file_210201.out index a4b78de5..d9b141b3 100644 --- a/norminette/tests/rules/test_file_210201.out +++ b/tests/rules/samples/test_file_210201.out @@ -24,9 +24,10 @@ test_file_210201.c - IsBlockEnd In "Function" from "GlobalScope" line 13": -test_file_210201.c: KO! - RETURN_PARENTHESIS (line: 3, col: 20): Return value must be in parenthesis - RETURN_PARENTHESIS (line: 4, col: 17): Return value must be in parenthesis - RETURN_PARENTHESIS (line: 5, col: 18): Return value must be in parenthesis - RETURN_PARENTHESIS (line: 6, col: 20): Return value must be in parenthesis - RETURN_PARENTHESIS (line: 7, col: 17): Return value must be in parenthesis +test_file_210201.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: RETURN_PARENTHESIS (line: 3, col: 20): Return value must be in parenthesis +Error: RETURN_PARENTHESIS (line: 4, col: 17): Return value must be in parenthesis +Error: RETURN_PARENTHESIS (line: 5, col: 18): Return value must be in parenthesis +Error: RETURN_PARENTHESIS (line: 6, col: 20): Return value must be in parenthesis +Error: RETURN_PARENTHESIS (line: 7, col: 17): Return value must be in parenthesis diff --git a/norminette/tests/rules/test_file_210201_2.c b/tests/rules/samples/test_file_210201_2.c similarity index 92% rename from norminette/tests/rules/test_file_210201_2.c rename to tests/rules/samples/test_file_210201_2.c index 1caaa67c..8e4ece71 100644 --- a/norminette/tests/rules/test_file_210201_2.c +++ b/tests/rules/samples/test_file_210201_2.c @@ -13,7 +13,7 @@ void test(void) int x; int (*f)(const t_module * m, char ***p_options - , int *has_options); + , int *has_options); (void)i; (void)f; } diff --git a/norminette/tests/rules/test_file_210201_2.out b/tests/rules/samples/test_file_210201_2.out similarity index 86% rename from norminette/tests/rules/test_file_210201_2.out rename to tests/rules/samples/test_file_210201_2.out index 845344e6..80965869 100644 --- a/norminette/tests/rules/test_file_210201_2.out +++ b/tests/rules/samples/test_file_210201_2.out @@ -28,7 +28,7 @@ test_file_210201_2.c - IsVarDeclaration In "Function" from "GlobalScope" line 15": - + test_file_210201_2.c - IsExpressionStatement In "Function" from "GlobalScope" line 17": test_file_210201_2.c - IsExpressionStatement In "Function" from "GlobalScope" line 18": @@ -47,7 +47,7 @@ test_file_210201_2.c - IsEmptyLine In "Function" from "GlobalScope" line 25": -test_file_210201_2.c - IsDeclaration In "Function" from "GlobalScope" line 26": +test_file_210201_2.c - IsVarDeclaration In "Function" from "GlobalScope" line 26": test_file_210201_2.c - IsExpressionStatement In "Function" from "GlobalScope" line 27": @@ -55,6 +55,10 @@ test_file_210201_2.c - IsBlockEnd In "Function" from "GlobalScope" line 29": -test_file_210201_2.c: KO! - VAR_DECL_START_FUNC (line: 15, col: 1): Variable declaration not at start of function - SPC_AFTER_POINTER (line: 15, col: 29): space after pointer +test_file_210201_2.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: VAR_DECL_START_FUNC (line: 15, col: 1): Variable declaration not at start of function +Error: SPC_AFTER_POINTER (line: 15, col: 29): space after pointer +Error: COMMA_START_LINE (line: 15, col: 51): Comma at line start +Error: VAR_DECL_START_FUNC (line: 26, col: 1): Variable declaration not at start of function +Error: SPACE_REPLACE_TAB (line: 26, col: 8): Found space when expecting tab diff --git a/norminette/tests/rules/test_file_210205.c b/tests/rules/samples/test_file_210205.c similarity index 100% rename from norminette/tests/rules/test_file_210205.c rename to tests/rules/samples/test_file_210205.c diff --git a/norminette/tests/rules/test_file_210205.out b/tests/rules/samples/test_file_210205.out similarity index 92% rename from norminette/tests/rules/test_file_210205.out rename to tests/rules/samples/test_file_210205.out index 5a8b4ad6..fb46a985 100644 --- a/norminette/tests/rules/test_file_210205.out +++ b/tests/rules/samples/test_file_210205.out @@ -10,4 +10,5 @@ test_file_210205.c - IsBlockEnd In "Function" from "GlobalScope" line 6": -test_file_210205.c: OK! +test_file_210205.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/test_file_210218.c b/tests/rules/samples/test_file_210218.c similarity index 100% rename from norminette/tests/rules/test_file_210218.c rename to tests/rules/samples/test_file_210218.c diff --git a/norminette/tests/rules/test_file_210218.out b/tests/rules/samples/test_file_210218.out similarity index 86% rename from norminette/tests/rules/test_file_210218.out rename to tests/rules/samples/test_file_210218.out index af66757f..cb0dea9f 100644 --- a/norminette/tests/rules/test_file_210218.out +++ b/tests/rules/samples/test_file_210218.out @@ -6,4 +6,5 @@ test_file_210218.c - IsBlockEnd In "Function" from "GlobalScope" line 4": -test_file_210218.c: OK! +test_file_210218.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/test_file_210218_2.c b/tests/rules/samples/test_file_210218_2.c similarity index 100% rename from norminette/tests/rules/test_file_210218_2.c rename to tests/rules/samples/test_file_210218_2.c diff --git a/norminette/tests/rules/test_file_210218_2.out b/tests/rules/samples/test_file_210218_2.out similarity index 76% rename from norminette/tests/rules/test_file_210218_2.out rename to tests/rules/samples/test_file_210218_2.out index a915afa9..c26880ba 100644 --- a/norminette/tests/rules/test_file_210218_2.out +++ b/tests/rules/samples/test_file_210218_2.out @@ -1,7 +1,7 @@ test_file_210218_2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": - + test_file_210218_2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": - + test_file_210218_2.c - IsEmptyLine In "GlobalScope" from "None" line 3": test_file_210218_2.c - IsFuncPrototype In "GlobalScope" from "None" line 4": @@ -12,5 +12,6 @@ test_file_210218_2.c - IsEmptyLine In "GlobalScope" from "None" line 8": test_file_210218_2.c - IsPreprocessorStatement In "GlobalScope" from "None" line 9": - -test_file_210218_2.c: OK! + +test_file_210218_2.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/norminette/tests/rules/test_file_210218_3.c b/tests/rules/samples/test_file_210218_3.c similarity index 100% rename from norminette/tests/rules/test_file_210218_3.c rename to tests/rules/samples/test_file_210218_3.c diff --git a/norminette/tests/rules/test_file_210218_3.out b/tests/rules/samples/test_file_210218_3.out similarity index 55% rename from norminette/tests/rules/test_file_210218_3.out rename to tests/rules/samples/test_file_210218_3.out index 8ac13453..747d6e36 100644 --- a/norminette/tests/rules/test_file_210218_3.out +++ b/tests/rules/samples/test_file_210218_3.out @@ -1,5 +1,5 @@ test_file_210218_3.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": - + test_file_210218_3.c - IsUserDefinedType In "GlobalScope" from "None" line 2": test_file_210218_3.c - IsUserDefinedType In "GlobalScope" from "None" line 3": @@ -8,4 +8,10 @@ test_file_210218_3.c - IsUserDefinedType In "GlobalScope" from "None" line 5": -test_file_210218_3.c: OK! +test_file_210218_3.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: FORBIDDEN_TYPEDEF (line: 2, col: 1): Typedef declaration are not allowed in .c files +Error: NL_AFTER_PREPROC (line: 2, col: 1): Preprocessor statement must be followed by a newline +Error: FORBIDDEN_TYPEDEF (line: 3, col: 1): Typedef declaration are not allowed in .c files +Error: FORBIDDEN_CHAR_NAME (line: 3, col: 24): user defined identifiers should contain only lowercase characters, digits or '_' +Error: FORBIDDEN_TYPEDEF (line: 5, col: 1): Typedef declaration are not allowed in .c files diff --git a/norminette/tests/rules/test_file_210223.c b/tests/rules/samples/test_file_210223.c similarity index 100% rename from norminette/tests/rules/test_file_210223.c rename to tests/rules/samples/test_file_210223.c diff --git a/norminette/tests/rules/test_file_210223.out b/tests/rules/samples/test_file_210223.out similarity index 78% rename from norminette/tests/rules/test_file_210223.out rename to tests/rules/samples/test_file_210223.out index db0d1822..a93c258a 100644 --- a/norminette/tests/rules/test_file_210223.out +++ b/tests/rules/samples/test_file_210223.out @@ -3,12 +3,11 @@ test_file_210223.c - IsBlockStart In "Function" from "GlobalScope" line 2": test_file_210223.c - IsControlStatement In "Function" from "GlobalScope" line 3": - + test_file_210223.c - IsExpressionStatement In "ControlStructure" from "Function" line 5": test_file_210223.c - IsFunctionCall In "Function" from "GlobalScope" line 6": - + test_file_210223.c - IsBlockEnd In "Function" from "GlobalScope" line 8": test_file_210223.c - IsEmptyLine In "GlobalScope" from "None" line 9": @@ -23,4 +22,7 @@ test_file_210223.c - IsBlockEnd In "Function" from "GlobalScope" line 14": -test_file_210223.c: OK! +test_file_210223.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: MIXED_SPACE_TAB (line: 3, col: 61): Mixed spaces and tabs +Error: TAB_INSTEAD_SPC (line: 4, col: 1): Found tab when expecting space diff --git a/norminette/tests/rules/test_file_210304.c b/tests/rules/samples/test_file_210304.c similarity index 100% rename from norminette/tests/rules/test_file_210304.c rename to tests/rules/samples/test_file_210304.c diff --git a/norminette/tests/rules/test_file_210304.out b/tests/rules/samples/test_file_210304.out similarity index 94% rename from norminette/tests/rules/test_file_210304.out rename to tests/rules/samples/test_file_210304.out index d73063eb..4cab1f03 100644 --- a/norminette/tests/rules/test_file_210304.out +++ b/tests/rules/samples/test_file_210304.out @@ -55,4 +55,8 @@ test_file_210304.c - IsBlockEnd In "Function" from "GlobalScope" line 31": -test_file_210304.c: OK! +test_file_210304.c: Error! +Error: FORBIDDEN_TYPEDEF (line: 1, col: 1): Typedef declaration are not allowed in .c files +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: FORBIDDEN_TYPEDEF (line: 6, col: 1): Typedef declaration are not allowed in .c files +Error: FORBIDDEN_TYPEDEF (line: 14, col: 1): Typedef declaration are not allowed in .c files diff --git a/tests/rules/samples/test_file_210308.c b/tests/rules/samples/test_file_210308.c new file mode 100644 index 00000000..a716307d --- /dev/null +++ b/tests/rules/samples/test_file_210308.c @@ -0,0 +1,35 @@ +void test1(void) +{ +} +/** + * test + */ + +void test2(void) +{ +} + +/** + * test + */ + +void test2(void) +{ +} + +/** + * test + */ +void test2(void) +{ +} +/** + * test + */ +void test2(void) +{ +} +#define TOTO 2 +void test2(void) +{ +} diff --git a/tests/rules/samples/test_file_210308.out b/tests/rules/samples/test_file_210308.out new file mode 100644 index 00000000..5f3ab185 --- /dev/null +++ b/tests/rules/samples/test_file_210308.out @@ -0,0 +1,68 @@ +test_file_210308.c - IsFuncDeclaration In "GlobalScope" from "None" line 1": + +test_file_210308.c - IsBlockStart In "Function" from "GlobalScope" line 2": + +test_file_210308.c - IsBlockEnd In "Function" from "GlobalScope" line 3": + +test_file_210308.c - IsComment In "GlobalScope" from "None" line 4": + +test_file_210308.c - IsEmptyLine In "GlobalScope" from "None" line 7": + +test_file_210308.c - IsFuncDeclaration In "GlobalScope" from "None" line 8": + +test_file_210308.c - IsBlockStart In "Function" from "GlobalScope" line 9": + +test_file_210308.c - IsBlockEnd In "Function" from "GlobalScope" line 10": + +test_file_210308.c - IsEmptyLine In "GlobalScope" from "None" line 11": + +test_file_210308.c - IsComment In "GlobalScope" from "None" line 12": + +test_file_210308.c - IsEmptyLine In "GlobalScope" from "None" line 15": + +test_file_210308.c - IsFuncDeclaration In "GlobalScope" from "None" line 16": + +test_file_210308.c - IsBlockStart In "Function" from "GlobalScope" line 17": + +test_file_210308.c - IsBlockEnd In "Function" from "GlobalScope" line 18": + +test_file_210308.c - IsEmptyLine In "GlobalScope" from "None" line 19": + +test_file_210308.c - IsComment In "GlobalScope" from "None" line 20": + +test_file_210308.c - IsFuncDeclaration In "GlobalScope" from "None" line 23": + +test_file_210308.c - IsBlockStart In "Function" from "GlobalScope" line 24": + +test_file_210308.c - IsBlockEnd In "Function" from "GlobalScope" line 25": + +test_file_210308.c - IsComment In "GlobalScope" from "None" line 26": + +test_file_210308.c - IsFuncDeclaration In "GlobalScope" from "None" line 29": + +test_file_210308.c - IsBlockStart In "Function" from "GlobalScope" line 30": + +test_file_210308.c - IsBlockEnd In "Function" from "GlobalScope" line 31": + +test_file_210308.c - IsPreprocessorStatement In "GlobalScope" from "None" line 32": + +test_file_210308.c - IsFuncDeclaration In "GlobalScope" from "None" line 33": + +test_file_210308.c - IsBlockStart In "Function" from "GlobalScope" line 34": + +test_file_210308.c - IsBlockEnd In "Function" from "GlobalScope" line 35": + +test_file_210308.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: NEWLINE_PRECEDES_FUNC (line: 29, col: 1): Functions must be separated by a newline +Error: NEWLINE_PRECEDES_FUNC (line: 33, col: 1): Functions must be separated by a newline +Error: NL_AFTER_PREPROC (line: 33, col: 1): Preprocessor statement must be followed by a newline +Error: TOO_MANY_FUNCS (line: 33, col: 1): Too many functions in file diff --git a/tests/rules/samples/test_file_210308_2.c b/tests/rules/samples/test_file_210308_2.c new file mode 100644 index 00000000..d2890842 --- /dev/null +++ b/tests/rules/samples/test_file_210308_2.c @@ -0,0 +1,5 @@ +int main(void) +{ + if (!((*array)[i] = test((test + 1)))) + return (NULL); +} diff --git a/tests/rules/samples/test_file_210308_2.out b/tests/rules/samples/test_file_210308_2.out new file mode 100644 index 00000000..cb511971 --- /dev/null +++ b/tests/rules/samples/test_file_210308_2.out @@ -0,0 +1,13 @@ +test_file_210308_2.c - IsFuncDeclaration In "GlobalScope" from "None" line 1": + +test_file_210308_2.c - IsBlockStart In "Function" from "GlobalScope" line 2": + +test_file_210308_2.c - IsControlStatement In "Function" from "GlobalScope" line 3": + +test_file_210308_2.c - IsExpressionStatement In "ControlStructure" from "Function" line 4": + +test_file_210308_2.c - IsBlockEnd In "Function" from "GlobalScope" line 5": + +test_file_210308_2.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: ASSIGN_IN_CONTROL (line: 3, col: 23): Assignment in control structure diff --git a/tests/rules/samples/test_file_210308_3.c b/tests/rules/samples/test_file_210308_3.c new file mode 100644 index 00000000..9ffb1f8a --- /dev/null +++ b/tests/rules/samples/test_file_210308_3.c @@ -0,0 +1,8 @@ +#ifndef BUFFER_SIZE +# warning no BUFFER_SIZE specified, defaulting to 32 +# define BUFFER_SIZE 32 +#elif BUFFER_SIZE <= 0 +# warning BUFFER_SIZE <= 0, defaulting to 32 +# undef BUFFER_SIZE +# define BUFFER_SIZE 32 +#endif \ No newline at end of file diff --git a/tests/rules/samples/test_file_210308_3.out b/tests/rules/samples/test_file_210308_3.out new file mode 100644 index 00000000..013e7be2 --- /dev/null +++ b/tests/rules/samples/test_file_210308_3.out @@ -0,0 +1,18 @@ +test_file_210308_3.c - IsPreprocessorStatement In "GlobalScope" from "None" line 1": + +test_file_210308_3.c - IsPreprocessorStatement In "GlobalScope" from "None" line 2": + +test_file_210308_3.c - IsPreprocessorStatement In "GlobalScope" from "None" line 3": + +test_file_210308_3.c - IsPreprocessorStatement In "GlobalScope" from "None" line 4": + +test_file_210308_3.c - IsPreprocessorStatement In "GlobalScope" from "None" line 5": + +test_file_210308_3.c - IsPreprocessorStatement In "GlobalScope" from "None" line 6": + +test_file_210308_3.c - IsPreprocessorStatement In "GlobalScope" from "None" line 7": + +test_file_210308_3.c - IsPreprocessorStatement In "GlobalScope" from "None" line 8": + +test_file_210308_3.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/tests/rules/samples/test_file_210316.c b/tests/rules/samples/test_file_210316.c new file mode 100644 index 00000000..a11ee305 --- /dev/null +++ b/tests/rules/samples/test_file_210316.c @@ -0,0 +1,38 @@ +int main(void) +{ + if (((unsigned char*)s1)[curr] != ((unsigned char*)s2)[curr]) + return (((unsigned char*)s1)[curr] - ((unsigned char*)s2)[curr]); +} + +struct my_struct g_struct = { + .field = (void *)&value +}; + +int main(void) +{ + t_quaternion g_quat = { + .r = 1, + .i = 0, + .j = 0, + .k = 0 + }; +} +t_quaternion g_quat = { + .r = 1, + .i = 0, + .j = 0, + .k = 0 +}; +*result = (t_quaternion) +{ + .r = 1, + .i = 0, + .j = 0, + .k = 0 +}; + +static const t_quaternion g_quat = {.r = 1, .i = 0, .j = 0, .k = 0}; +static const t_quaternion g_quat = {r : 1, i : 0, j: 0, k : 0 }; +static const t_quaternion g_quats[4] = {[0].r = 3, [1].i = 5 }; +*result = (t_quaternion){1, 0, 0, 0}; +*result = (t_quaternion){r : 1, i : 0, j : 0, k : 0 }; \ No newline at end of file diff --git a/tests/rules/samples/test_file_210316.out b/tests/rules/samples/test_file_210316.out new file mode 100644 index 00000000..f4aa50a7 --- /dev/null +++ b/tests/rules/samples/test_file_210316.out @@ -0,0 +1,77 @@ +test_file_210316.c - IsFuncDeclaration In "GlobalScope" from "None" line 1": + +test_file_210316.c - IsBlockStart In "Function" from "GlobalScope" line 2": + +test_file_210316.c - IsControlStatement In "Function" from "GlobalScope" line 3": + +test_file_210316.c - IsExpressionStatement In "ControlStructure" from "Function" line 4": + +test_file_210316.c - IsBlockEnd In "Function" from "GlobalScope" line 5": + +test_file_210316.c - IsEmptyLine In "GlobalScope" from "None" line 6": + +test_file_210316.c - IsVarDeclaration In "GlobalScope" from "None" line 7": + + + +test_file_210316.c - IsEmptyLine In "GlobalScope" from "None" line 10": + +test_file_210316.c - IsFuncDeclaration In "GlobalScope" from "None" line 11": + +test_file_210316.c - IsBlockStart In "Function" from "GlobalScope" line 12": + +test_file_210316.c - IsVarDeclaration In "Function" from "GlobalScope" line 13": + + + + + + +test_file_210316.c - IsBlockEnd In "Function" from "GlobalScope" line 19": + +test_file_210316.c - IsVarDeclaration In "GlobalScope" from "None" line 20": + + + + + + +test_file_210316.c - IsAssignation In "GlobalScope" from "None" line 26": + + + + + + + +test_file_210316.c - IsEmptyLine In "GlobalScope" from "None" line 33": + +test_file_210316.c - IsVarDeclaration In "GlobalScope" from "None" line 34": + +test_file_210316.c - IsVarDeclaration In "GlobalScope" from "None" line 35": + +test_file_210316.c - IsVarDeclaration In "GlobalScope" from "None" line 36": + +test_file_210316.c - IsAssignation In "GlobalScope" from "None" line 37": + +test_file_210316.c - IsAssignation In "GlobalScope" from "None" line 38": + +test_file_210316.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: SPACE_AFTER_KW (line: 3, col: 20): Missing space after keyword +Error: SPACE_AFTER_KW (line: 3, col: 50): Missing space after keyword +Error: SPACE_AFTER_KW (line: 4, col: 28): Missing space after keyword +Error: SPACE_AFTER_KW (line: 4, col: 57): Missing space after keyword +Notice: GLOBAL_VAR_DETECTED (line: 7, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Error: DECL_ASSIGN_LINE (line: 13, col: 28): Declaration and assignation on a single line +Error: DECL_ASSIGN_LINE (line: 14, col: 12): Declaration and assignation on a single line +Error: DECL_ASSIGN_LINE (line: 15, col: 12): Declaration and assignation on a single line +Error: DECL_ASSIGN_LINE (line: 16, col: 12): Declaration and assignation on a single line +Error: DECL_ASSIGN_LINE (line: 17, col: 12): Declaration and assignation on a single line +Notice: GLOBAL_VAR_DETECTED (line: 20, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Notice: GLOBAL_VAR_DETECTED (line: 34, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Notice: GLOBAL_VAR_DETECTED (line: 35, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Error: NO_SPC_BFR_PAR (line: 35, col: 65): Extra space before parenthesis (brace/bracket) +Notice: GLOBAL_VAR_DETECTED (line: 36, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Error: NO_SPC_BFR_PAR (line: 36, col: 64): Extra space before parenthesis (brace/bracket) +Error: NO_SPC_BFR_PAR (line: 38, col: 53): Extra space before parenthesis (brace/bracket) diff --git a/tests/rules/samples/test_file_210322.c b/tests/rules/samples/test_file_210322.c new file mode 100644 index 00000000..6406dc19 --- /dev/null +++ b/tests/rules/samples/test_file_210322.c @@ -0,0 +1,12 @@ +t_test_struct test(void) +{ + static const t_test_struct s = ((t_test_struct) + { + .value = 42 + }); + + return ((t_test_struct) + ( + .value = 42 + )); +} diff --git a/tests/rules/samples/test_file_210322.out b/tests/rules/samples/test_file_210322.out new file mode 100644 index 00000000..93a6075a --- /dev/null +++ b/tests/rules/samples/test_file_210322.out @@ -0,0 +1,20 @@ +test_file_210322.c - IsFuncDeclaration In "GlobalScope" from "None" line 1": + +test_file_210322.c - IsBlockStart In "Function" from "GlobalScope" line 2": + +test_file_210322.c - IsVarDeclaration In "Function" from "GlobalScope" line 3": + + + + +test_file_210322.c - IsEmptyLine In "Function" from "GlobalScope" line 7": + +test_file_210322.c - IsExpressionStatement In "Function" from "GlobalScope" line 8": + + + + +test_file_210322.c - IsBlockEnd In "Function" from "GlobalScope" line 12": + +test_file_210322.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/tests/rules/samples/test_file_210412.c b/tests/rules/samples/test_file_210412.c new file mode 100644 index 00000000..7292e4ea --- /dev/null +++ b/tests/rules/samples/test_file_210412.c @@ -0,0 +1,21 @@ +static t_crd get_first_basis(t_crd *normal) +{ + (pvec_module(normal) * pvec_module(&basis_vec)); +} + +int v(int *restrict t) +{ + const char *restrict s = "Hello World"; + + printf("%s\n", s); + return (0); +} + +int error_std( + t_shell_context *context, + int return_code, + ... +) +{ + return ; +} \ No newline at end of file diff --git a/tests/rules/samples/test_file_210412.out b/tests/rules/samples/test_file_210412.out new file mode 100644 index 00000000..f060c7bf --- /dev/null +++ b/tests/rules/samples/test_file_210412.out @@ -0,0 +1,42 @@ +test_file_210412.c - IsFuncDeclaration In "GlobalScope" from "None" line 1": + +test_file_210412.c - IsBlockStart In "Function" from "GlobalScope" line 2": + +test_file_210412.c - IsDeclaration In "Function" from "GlobalScope" line 3": + +test_file_210412.c - IsBlockEnd In "Function" from "GlobalScope" line 4": + +test_file_210412.c - IsEmptyLine In "GlobalScope" from "None" line 5": + +test_file_210412.c - IsFuncDeclaration In "GlobalScope" from "None" line 6": + +test_file_210412.c - IsBlockStart In "Function" from "GlobalScope" line 7": + +test_file_210412.c - IsVarDeclaration In "Function" from "GlobalScope" line 8": + +test_file_210412.c - IsEmptyLine In "Function" from "GlobalScope" line 9": + +test_file_210412.c - IsFunctionCall In "Function" from "GlobalScope" line 10": + +test_file_210412.c - IsExpressionStatement In "Function" from "GlobalScope" line 11": + +test_file_210412.c - IsBlockEnd In "Function" from "GlobalScope" line 12": + +test_file_210412.c - IsEmptyLine In "GlobalScope" from "None" line 13": + +test_file_210412.c - IsFuncDeclaration In "GlobalScope" from "None" line 14": + + + + + +test_file_210412.c - IsBlockStart In "Function" from "GlobalScope" line 19": + +test_file_210412.c - IsExpressionStatement In "Function" from "GlobalScope" line 20": + +test_file_210412.c - IsBlockEnd In "Function" from "GlobalScope" line 21": + +test_file_210412.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: SPACE_BEFORE_FUNC (line: 14, col: 4): Found space when expecting tab before function name +Error: BRACE_SHOULD_EOL (line: 21, col: 1): Expected newline after brace diff --git a/tests/rules/samples/test_file_210923.c b/tests/rules/samples/test_file_210923.c new file mode 100644 index 00000000..7666ec25 --- /dev/null +++ b/tests/rules/samples/test_file_210923.c @@ -0,0 +1,107 @@ +int main(void) +{ + int a; + int b; + + (a = 4) && (b = 6); +} + +#include + +int main(void) +{ + int a; + + (a = 0, printf("%d\n", a)); + return (0); +} + +int v(int *restrict t) +{ + const char *restrict s = "Hello World"; + + printf("%s\n", s), a = 15; + return (0); +} + +int main(void) +{ + int a; + + a = 0, printf("%d\n", a); + return (0); +} + +int main(void) +{ + a = (FLOAT)-0.5f; + a = (FLOAT)+0.5f; + a = (FLOAT)0.5f; + a = (float)-0.5f; + a = (float)+0.5f; + a = (float)0.5f; + a = (FLOAT)a; + a = (float)a; +} + +__attribute__((warn_unused_result)) int main(int argc, char **argv) +{ + printf("Hello, world!\n"); +} + +void *xmalloc(size_t size) __attribute__((malloc)) __attribute__((warn_unused_result)); + +int main(void) +{ + a = ({4;}); +} + +void draw_player(t_env *env) +{ + env->p->f_x += (env->p->x - env->p->f_x) * 0.5; + env->p->f_y += (env->p->y - env->p->f_y) * 0.5; + draw_on_image(env->main_img, env->p->img, ((int)(env->p->f_y) * 64), ((int)(env->p->f_x) * 64)); +} + +void draw_player(t_env *env) +{ + env->p->f_x += (env->p->x - env->p->f_x) * 0.5; + env->p->f_y += (env->p->y - env->p->f_y) * 0.5; + draw_on_image(env->main_img, env->p->img, ((int)(env->p->f_y) * 64), ((int)(env->p->f_x) * 64)); +} + +int main(void) +{ + t_int * restrict a = NULL; + t_int * restrict b = 1; + t_int * restrict c = 1; + t_int * restrict d = 1; + t_int * restrict e = 1; + t_int * restrict f = 1; +} + +int (*open_pipe(int nb_of_cmd))[2] +{ + int i; + t_pipe pipe_fd; + + pipe_fd = malloc(sizeof(int [2]) * (nb_of_cmd - 1)); + if (error_catch(pipe_fd == 0, "system", "fail to malloc pipe table")) + return (NULL); + i = 0; + while (i < nb_of_cmd - 1) + { + if (error_catch(pipe(pipe_fd[i++]) == -1, "system", + "fail to open pipe")) + { + while (--i) + { + close(pipe_fd[i][0]); + close(pipe_fd[i][1]); + } + free(pipe_fd); + return (NULL); + } + } + return (pipe_fd); +} diff --git a/tests/rules/samples/test_file_210923.out b/tests/rules/samples/test_file_210923.out new file mode 100644 index 00000000..623931f7 --- /dev/null +++ b/tests/rules/samples/test_file_210923.out @@ -0,0 +1,257 @@ +test_file_210923.c - IsFuncDeclaration In "GlobalScope" from "None" line 1": + +test_file_210923.c - IsBlockStart In "Function" from "GlobalScope" line 2": + +test_file_210923.c - IsVarDeclaration In "Function" from "GlobalScope" line 3": + +test_file_210923.c - IsVarDeclaration In "Function" from "GlobalScope" line 4": + +test_file_210923.c - IsEmptyLine In "Function" from "GlobalScope" line 5": + +test_file_210923.c - IsAssignation In "Function" from "GlobalScope" line 6": + +test_file_210923.c - IsBlockEnd In "Function" from "GlobalScope" line 7": + +test_file_210923.c - IsEmptyLine In "GlobalScope" from "None" line 8": + +test_file_210923.c - IsPreprocessorStatement In "GlobalScope" from "None" line 9": + +test_file_210923.c - IsEmptyLine In "GlobalScope" from "None" line 10": + +test_file_210923.c - IsFuncDeclaration In "GlobalScope" from "None" line 11": + +test_file_210923.c - IsBlockStart In "Function" from "GlobalScope" line 12": + +test_file_210923.c - IsVarDeclaration In "Function" from "GlobalScope" line 13": + +test_file_210923.c - IsEmptyLine In "Function" from "GlobalScope" line 14": + +test_file_210923.c - IsAssignation In "Function" from "GlobalScope" line 15": + +test_file_210923.c - IsFunctionCall In "Function" from "GlobalScope" line 15": + +test_file_210923.c - IsExpressionStatement In "Function" from "GlobalScope" line 16": + +test_file_210923.c - IsBlockEnd In "Function" from "GlobalScope" line 17": + +test_file_210923.c - IsEmptyLine In "GlobalScope" from "None" line 18": + +test_file_210923.c - IsFuncDeclaration In "GlobalScope" from "None" line 19": + +test_file_210923.c - IsBlockStart In "Function" from "GlobalScope" line 20": + +test_file_210923.c - IsVarDeclaration In "Function" from "GlobalScope" line 21": + +test_file_210923.c - IsEmptyLine In "Function" from "GlobalScope" line 22": + +test_file_210923.c - IsFunctionCall In "Function" from "GlobalScope" line 23": + +test_file_210923.c - IsAssignation In "Function" from "GlobalScope" line 23": + +test_file_210923.c - IsExpressionStatement In "Function" from "GlobalScope" line 24": + +test_file_210923.c - IsBlockEnd In "Function" from "GlobalScope" line 25": + +test_file_210923.c - IsEmptyLine In "GlobalScope" from "None" line 26": + +test_file_210923.c - IsFuncDeclaration In "GlobalScope" from "None" line 27": + +test_file_210923.c - IsBlockStart In "Function" from "GlobalScope" line 28": + +test_file_210923.c - IsVarDeclaration In "Function" from "GlobalScope" line 29": + +test_file_210923.c - IsEmptyLine In "Function" from "GlobalScope" line 30": + +test_file_210923.c - IsAssignation In "Function" from "GlobalScope" line 31": + +test_file_210923.c - IsFunctionCall In "Function" from "GlobalScope" line 31": + +test_file_210923.c - IsExpressionStatement In "Function" from "GlobalScope" line 32": + +test_file_210923.c - IsBlockEnd In "Function" from "GlobalScope" line 33": + +test_file_210923.c - IsEmptyLine In "GlobalScope" from "None" line 34": + +test_file_210923.c - IsFuncDeclaration In "GlobalScope" from "None" line 35": + +test_file_210923.c - IsBlockStart In "Function" from "GlobalScope" line 36": + +test_file_210923.c - IsAssignation In "Function" from "GlobalScope" line 37": + +test_file_210923.c - IsAssignation In "Function" from "GlobalScope" line 38": + +test_file_210923.c - IsAssignation In "Function" from "GlobalScope" line 39": + +test_file_210923.c - IsAssignation In "Function" from "GlobalScope" line 40": + +test_file_210923.c - IsAssignation In "Function" from "GlobalScope" line 41": + +test_file_210923.c - IsAssignation In "Function" from "GlobalScope" line 42": + +test_file_210923.c - IsAssignation In "Function" from "GlobalScope" line 43": + +test_file_210923.c - IsAssignation In "Function" from "GlobalScope" line 44": + +test_file_210923.c - IsBlockEnd In "Function" from "GlobalScope" line 45": + +test_file_210923.c - IsEmptyLine In "GlobalScope" from "None" line 46": + +test_file_210923.c - IsFuncDeclaration In "GlobalScope" from "None" line 47": + +test_file_210923.c - IsBlockStart In "Function" from "GlobalScope" line 48": + +test_file_210923.c - IsFunctionCall In "Function" from "GlobalScope" line 49": + +test_file_210923.c - IsBlockEnd In "Function" from "GlobalScope" line 50": + +test_file_210923.c - IsEmptyLine In "GlobalScope" from "None" line 51": + +test_file_210923.c - IsFuncPrototype In "GlobalScope" from "None" line 52": + +test_file_210923.c - IsEmptyLine In "GlobalScope" from "None" line 53": + +test_file_210923.c - IsFuncDeclaration In "GlobalScope" from "None" line 54": + +test_file_210923.c - IsBlockStart In "Function" from "GlobalScope" line 55": + +test_file_210923.c - IsAssignation In "Function" from "GlobalScope" line 56": + +test_file_210923.c - IsBlockEnd In "Function" from "GlobalScope" line 57": + +test_file_210923.c - IsEmptyLine In "GlobalScope" from "None" line 58": + +test_file_210923.c - IsFuncDeclaration In "GlobalScope" from "None" line 59": + +test_file_210923.c - IsBlockStart In "Function" from "GlobalScope" line 60": + +test_file_210923.c - IsAssignation In "Function" from "GlobalScope" line 61": + +test_file_210923.c - IsAssignation In "Function" from "GlobalScope" line 62": + +test_file_210923.c - IsFunctionCall In "Function" from "GlobalScope" line 63": + +test_file_210923.c - IsBlockEnd In "Function" from "GlobalScope" line 64": + +test_file_210923.c - IsEmptyLine In "GlobalScope" from "None" line 65": + +test_file_210923.c - IsFuncDeclaration In "GlobalScope" from "None" line 66": + +test_file_210923.c - IsBlockStart In "Function" from "GlobalScope" line 67": + +test_file_210923.c - IsAssignation In "Function" from "GlobalScope" line 68": + +test_file_210923.c - IsAssignation In "Function" from "GlobalScope" line 69": + +test_file_210923.c - IsFunctionCall In "Function" from "GlobalScope" line 70": + +test_file_210923.c - IsBlockEnd In "Function" from "GlobalScope" line 71": + +test_file_210923.c - IsEmptyLine In "GlobalScope" from "None" line 72": + +test_file_210923.c - IsFuncDeclaration In "GlobalScope" from "None" line 73": + +test_file_210923.c - IsBlockStart In "Function" from "GlobalScope" line 74": + +test_file_210923.c - IsVarDeclaration In "Function" from "GlobalScope" line 75": + +test_file_210923.c - IsVarDeclaration In "Function" from "GlobalScope" line 76": + +test_file_210923.c - IsVarDeclaration In "Function" from "GlobalScope" line 77": + +test_file_210923.c - IsVarDeclaration In "Function" from "GlobalScope" line 78": + +test_file_210923.c - IsVarDeclaration In "Function" from "GlobalScope" line 79": + +test_file_210923.c - IsVarDeclaration In "Function" from "GlobalScope" line 80": + +test_file_210923.c - IsBlockEnd In "Function" from "GlobalScope" line 81": + +test_file_210923.c - IsEmptyLine In "GlobalScope" from "None" line 82": + +test_file_210923.c - IsFuncDeclaration In "GlobalScope" from "None" line 83": + +test_file_210923.c - IsBlockStart In "Function" from "GlobalScope" line 84": + +test_file_210923.c - IsVarDeclaration In "Function" from "GlobalScope" line 85": + +test_file_210923.c - IsVarDeclaration In "Function" from "GlobalScope" line 86": + +test_file_210923.c - IsEmptyLine In "Function" from "GlobalScope" line 87": + +test_file_210923.c - IsAssignation In "Function" from "GlobalScope" line 88": + +test_file_210923.c - IsControlStatement In "Function" from "GlobalScope" line 89": + +test_file_210923.c - IsExpressionStatement In "ControlStructure" from "Function" line 90": + +test_file_210923.c - IsAssignation In "Function" from "GlobalScope" line 91": + +test_file_210923.c - IsControlStatement In "Function" from "GlobalScope" line 92": + +test_file_210923.c - IsBlockStart In "ControlStructure" from "Function" line 93": + +test_file_210923.c - IsControlStatement In "ControlStructure" from "Function" line 94": + + +test_file_210923.c - IsBlockStart In "ControlStructure" from "ControlStructure" line 96": + +test_file_210923.c - IsControlStatement In "ControlStructure" from "ControlStructure" line 97": + +test_file_210923.c - IsBlockStart In "ControlStructure" from "ControlStructure" line 98": + +test_file_210923.c - IsFunctionCall In "ControlStructure" from "ControlStructure" line 99": + +test_file_210923.c - IsFunctionCall In "ControlStructure" from "ControlStructure" line 100": + +test_file_210923.c - IsBlockEnd In "ControlStructure" from "ControlStructure" line 101": + +test_file_210923.c - IsFunctionCall In "ControlStructure" from "ControlStructure" line 102": + +test_file_210923.c - IsExpressionStatement In "ControlStructure" from "ControlStructure" line 103": + +test_file_210923.c - IsBlockEnd In "ControlStructure" from "ControlStructure" line 104": + +test_file_210923.c - IsBlockEnd In "ControlStructure" from "Function" line 105": + +test_file_210923.c - IsExpressionStatement In "Function" from "GlobalScope" line 106": + +test_file_210923.c - IsBlockEnd In "Function" from "GlobalScope" line 107": + +test_file_210923.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: MULT_ASSIGN_LINE (line: 6, col: 16): Multiple assignations on a single line +Error: INCLUDE_START_FILE (line: 9, col: 1): Include must be at the start of file +Error: TOO_FEW_TAB (line: 15, col: 13): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 15, col: 13): Too many instructions on a single line +Error: TOO_FEW_TAB (line: 23, col: 24): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 23, col: 24): Too many instructions on a single line +Error: TOO_FEW_TAB (line: 31, col: 12): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 31, col: 12): Too many instructions on a single line +Error: TOO_MANY_FUNCS (line: 47, col: 1): Too many functions in file +Error: LINE_TOO_LONG (line: 52, col: 88): line too long +Error: TOO_MANY_FUNCS (line: 54, col: 1): Too many functions in file +Error: TOO_MANY_FUNCS (line: 59, col: 1): Too many functions in file +Error: LINE_TOO_LONG (line: 63, col: 84): line too long +Error: TOO_MANY_FUNCS (line: 66, col: 1): Too many functions in file +Error: LINE_TOO_LONG (line: 70, col: 84): line too long +Error: TOO_MANY_FUNCS (line: 73, col: 1): Too many functions in file +Error: SPACE_REPLACE_TAB (line: 75, col: 10): Found space when expecting tab +Error: SPC_AFTER_POINTER (line: 75, col: 11): space after pointer +Error: DECL_ASSIGN_LINE (line: 75, col: 24): Declaration and assignation on a single line +Error: SPACE_REPLACE_TAB (line: 76, col: 10): Found space when expecting tab +Error: SPC_AFTER_POINTER (line: 76, col: 11): space after pointer +Error: DECL_ASSIGN_LINE (line: 76, col: 24): Declaration and assignation on a single line +Error: SPACE_REPLACE_TAB (line: 77, col: 10): Found space when expecting tab +Error: SPC_AFTER_POINTER (line: 77, col: 11): space after pointer +Error: DECL_ASSIGN_LINE (line: 77, col: 24): Declaration and assignation on a single line +Error: SPACE_REPLACE_TAB (line: 78, col: 10): Found space when expecting tab +Error: SPC_AFTER_POINTER (line: 78, col: 11): space after pointer +Error: DECL_ASSIGN_LINE (line: 78, col: 24): Declaration and assignation on a single line +Error: SPACE_REPLACE_TAB (line: 79, col: 10): Found space when expecting tab +Error: SPC_AFTER_POINTER (line: 79, col: 11): space after pointer +Error: DECL_ASSIGN_LINE (line: 79, col: 24): Declaration and assignation on a single line +Error: TOO_MANY_VARS_FUNC (line: 80, col: 1): Too many variables declarations in a function +Error: SPACE_REPLACE_TAB (line: 80, col: 10): Found space when expecting tab +Error: SPC_AFTER_POINTER (line: 80, col: 11): space after pointer +Error: DECL_ASSIGN_LINE (line: 80, col: 24): Declaration and assignation on a single line +Error: TOO_MANY_FUNCS (line: 83, col: 1): Too many functions in file diff --git a/tests/rules/samples/test_file_operators.c b/tests/rules/samples/test_file_operators.c new file mode 100644 index 00000000..193aa330 --- /dev/null +++ b/tests/rules/samples/test_file_operators.c @@ -0,0 +1,9 @@ +int main(void) +{ + int a; + unsigned int b[1]; + + a = -10; + *b = (unsigned int)-a; + return (0); +} diff --git a/tests/rules/samples/test_file_operators.out b/tests/rules/samples/test_file_operators.out new file mode 100644 index 00000000..a4fc2c1f --- /dev/null +++ b/tests/rules/samples/test_file_operators.out @@ -0,0 +1,20 @@ +test_file_operators.c - IsFuncDeclaration In "GlobalScope" from "None" line 1": + +test_file_operators.c - IsBlockStart In "Function" from "GlobalScope" line 2": + +test_file_operators.c - IsVarDeclaration In "Function" from "GlobalScope" line 3": + +test_file_operators.c - IsVarDeclaration In "Function" from "GlobalScope" line 4": + +test_file_operators.c - IsEmptyLine In "Function" from "GlobalScope" line 5": + +test_file_operators.c - IsAssignation In "Function" from "GlobalScope" line 6": + +test_file_operators.c - IsAssignation In "Function" from "GlobalScope" line 7": + +test_file_operators.c - IsExpressionStatement In "Function" from "GlobalScope" line 8": + +test_file_operators.c - IsBlockEnd In "Function" from "GlobalScope" line 9": + +test_file_operators.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/tests/rules/samples/test_islabel.c b/tests/rules/samples/test_islabel.c new file mode 100644 index 00000000..5a304aa6 --- /dev/null +++ b/tests/rules/samples/test_islabel.c @@ -0,0 +1,55 @@ +int main(void) +{ + dummy(4); + kikuti(); + return (0); +} + +void dummy(int n) +{ + if (n == 4) + goto _4; + if (n == 2) { + goto _2; + } + goto end; + _4: + { + write(1, "4", 1); + } + _2: + write(1, "2", 1); +end: + write(1, "\n", 1); +bla: return; +blo: printf( + "NiumXp :D") + ; +ble:( + printf( + "NiumXp :D" + ) + ) +;} + +void kikuti(void) +{ + int i; + + i = 0; +loop: + if (i < 10) + goto loop; + { + { + goto calcal; + calcal: + } + while (1) + { + goto calcal; + } + if (1) + goto calcal; + } +} diff --git a/tests/rules/samples/test_islabel.out b/tests/rules/samples/test_islabel.out new file mode 100644 index 00000000..ecd306e8 --- /dev/null +++ b/tests/rules/samples/test_islabel.out @@ -0,0 +1,151 @@ +test_islabel.c - IsFuncDeclaration In "GlobalScope" from "None" line 1": + +test_islabel.c - IsBlockStart In "Function" from "GlobalScope" line 2": + +test_islabel.c - IsFunctionCall In "Function" from "GlobalScope" line 3": + +test_islabel.c - IsFunctionCall In "Function" from "GlobalScope" line 4": + +test_islabel.c - IsExpressionStatement In "Function" from "GlobalScope" line 5": + +test_islabel.c - IsBlockEnd In "Function" from "GlobalScope" line 6": + +test_islabel.c - IsEmptyLine In "GlobalScope" from "None" line 7": + +test_islabel.c - IsFuncDeclaration In "GlobalScope" from "None" line 8": + +test_islabel.c - IsBlockStart In "Function" from "GlobalScope" line 9": + +test_islabel.c - IsControlStatement In "Function" from "GlobalScope" line 10": + +test_islabel.c - IsExpressionStatement In "ControlStructure" from "Function" line 11": + +test_islabel.c - IsControlStatement In "Function" from "GlobalScope" line 12": + +test_islabel.c - IsBlockStart In "ControlStructure" from "Function" line 12": + +test_islabel.c - IsExpressionStatement In "ControlStructure" from "Function" line 13": + +test_islabel.c - IsBlockEnd In "ControlStructure" from "Function" line 14": + +test_islabel.c - IsExpressionStatement In "Function" from "GlobalScope" line 15": + +test_islabel.c - IsLabel In "Function" from "GlobalScope" line 16": + +test_islabel.c - IsBlockStart In "Function" from "GlobalScope" line 17": + +test_islabel.c - IsFunctionCall In "ControlStructure" from "Function" line 18": + +test_islabel.c - IsBlockEnd In "ControlStructure" from "Function" line 19": + +test_islabel.c - IsLabel In "Function" from "GlobalScope" line 20": + +test_islabel.c - IsFunctionCall In "Function" from "GlobalScope" line 21": + +test_islabel.c - IsLabel In "Function" from "GlobalScope" line 22": + +test_islabel.c - IsFunctionCall In "Function" from "GlobalScope" line 23": + +test_islabel.c - IsLabel In "Function" from "GlobalScope" line 24": + +test_islabel.c - IsVarDeclaration In "Function" from "GlobalScope" line 25": + + + +test_islabel.c - IsLabel In "Function" from "GlobalScope" line 28": + + + + + + +test_islabel.c - IsBlockEnd In "Function" from "GlobalScope" line 33": + +test_islabel.c - IsEmptyLine In "GlobalScope" from "None" line 34": + +test_islabel.c - IsFuncDeclaration In "GlobalScope" from "None" line 35": + +test_islabel.c - IsBlockStart In "Function" from "GlobalScope" line 36": + +test_islabel.c - IsVarDeclaration In "Function" from "GlobalScope" line 37": + +test_islabel.c - IsEmptyLine In "Function" from "GlobalScope" line 38": + +test_islabel.c - IsAssignation In "Function" from "GlobalScope" line 39": + +test_islabel.c - IsLabel In "Function" from "GlobalScope" line 40": + +test_islabel.c - IsControlStatement In "Function" from "GlobalScope" line 41": + +test_islabel.c - IsExpressionStatement In "ControlStructure" from "Function" line 42": + +test_islabel.c - IsBlockStart In "Function" from "GlobalScope" line 43": + +test_islabel.c - IsBlockStart In "ControlStructure" from "Function" line 44": + +test_islabel.c - IsExpressionStatement In "ControlStructure" from "ControlStructure" line 45": + +test_islabel.c - IsLabel In "ControlStructure" from "ControlStructure" line 46": + +test_islabel.c - IsBlockEnd In "ControlStructure" from "ControlStructure" line 47": + +test_islabel.c - IsControlStatement In "ControlStructure" from "Function" line 48": + +test_islabel.c - IsBlockStart In "ControlStructure" from "ControlStructure" line 49": + +test_islabel.c - IsExpressionStatement In "ControlStructure" from "ControlStructure" line 50": + +test_islabel.c - IsBlockEnd In "ControlStructure" from "ControlStructure" line 51": + +test_islabel.c - IsControlStatement In "ControlStructure" from "Function" line 52": + +test_islabel.c - IsExpressionStatement In "ControlStructure" from "ControlStructure" line 53": + +test_islabel.c - IsBlockEnd In "ControlStructure" from "Function" line 54": + +test_islabel.c - IsBlockEnd In "Function" from "GlobalScope" line 55": + +test_islabel.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: GOTO_FBIDDEN (line: 11, col: 1): Goto statements are forbidden +Error: NO_SPC_AFR_PAR (line: 12, col: 15): Extra space after parenthesis (brace/bracket) +Error: TOO_FEW_TAB (line: 12, col: 17): Missing tabs for indent level +Error: GOTO_FBIDDEN (line: 13, col: 1): Goto statements are forbidden +Error: GOTO_FBIDDEN (line: 15, col: 1): Goto statements are forbidden +Error: LABEL_FBIDDEN (line: 16, col: 1): Label statements are forbidden +Error: LABEL_FBIDDEN (line: 20, col: 1): Label statements are forbidden +Error: TOO_MANY_TAB (line: 21, col: 1): Extra tabs for indent level +Error: LABEL_FBIDDEN (line: 22, col: 1): Label statements are forbidden +Error: TOO_FEW_TAB (line: 22, col: 1): Missing tabs for indent level +Error: LABEL_FBIDDEN (line: 24, col: 1): Label statements are forbidden +Error: TOO_FEW_TAB (line: 24, col: 1): Missing tabs for indent level +Error: LABEL_FBIDDEN (line: 25, col: 1): Label statements are forbidden +Error: TOO_FEW_TAB (line: 25, col: 1): Missing tabs for indent level +Error: VAR_DECL_START_FUNC (line: 25, col: 1): Variable declaration not at start of function +Error: SPACE_REPLACE_TAB (line: 25, col: 4): Found space when expecting tab +Error: TOO_FEW_TAB (line: 26, col: 1): Missing tabs for indent level +Error: MIXED_SPACE_TAB (line: 27, col: 1): Mixed spaces and tabs +Error: TOO_FEW_TAB (line: 27, col: 1): Missing tabs for indent level +Error: NO_SPC_BFR_OPR (line: 27, col: 6): extra space before operator +Error: SPC_BEFORE_NL (line: 27, col: 7): Space before newline +Error: LABEL_FBIDDEN (line: 28, col: 1): Label statements are forbidden +Error: TOO_FEW_TAB (line: 28, col: 1): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 33, col: 2): Too many instructions on a single line +Error: LABEL_FBIDDEN (line: 40, col: 1): Label statements are forbidden +Error: TOO_FEW_TAB (line: 40, col: 1): Missing tabs for indent level +Error: GOTO_FBIDDEN (line: 42, col: 1): Goto statements are forbidden +Error: GOTO_FBIDDEN (line: 45, col: 1): Goto statements are forbidden +Error: TOO_FEW_TAB (line: 45, col: 1): Missing tabs for indent level +Error: LABEL_FBIDDEN (line: 46, col: 1): Label statements are forbidden +Error: TOO_FEW_TAB (line: 46, col: 1): Missing tabs for indent level +Error: TOO_FEW_TAB (line: 47, col: 1): Missing tabs for indent level +Error: TOO_FEW_TAB (line: 48, col: 1): Missing tabs for indent level +Error: TOO_FEW_TAB (line: 49, col: 1): Missing tabs for indent level +Error: GOTO_FBIDDEN (line: 50, col: 1): Goto statements are forbidden +Error: TOO_FEW_TAB (line: 50, col: 1): Missing tabs for indent level +Error: TOO_FEW_TAB (line: 51, col: 1): Missing tabs for indent level +Error: TOO_FEW_TAB (line: 52, col: 1): Missing tabs for indent level +Error: SPACE_REPLACE_TAB (line: 52, col: 9): Found space when expecting tab +Error: GOTO_FBIDDEN (line: 53, col: 1): Goto statements are forbidden +Error: TOO_FEW_TAB (line: 53, col: 1): Missing tabs for indent level +Error: SPACE_REPLACE_TAB (line: 53, col: 17): Found space when expecting tab diff --git a/tests/rules/samples/test_loop_inf.c b/tests/rules/samples/test_loop_inf.c new file mode 100644 index 00000000..b8e219e9 --- /dev/null +++ b/tests/rules/samples/test_loop_inf.c @@ -0,0 +1,5 @@ +void test(void) +{ + v = v[0] * v; + r[0][0] = m1[0][0] * m2[0][0] + m1[0][1] * m2[1][0] + m1[0][2] * m2[2][0]; +} diff --git a/tests/rules/samples/test_loop_inf.out b/tests/rules/samples/test_loop_inf.out new file mode 100644 index 00000000..3e2f1190 --- /dev/null +++ b/tests/rules/samples/test_loop_inf.out @@ -0,0 +1,12 @@ +test_loop_inf.c - IsFuncDeclaration In "GlobalScope" from "None" line 1": + +test_loop_inf.c - IsBlockStart In "Function" from "GlobalScope" line 2": + +test_loop_inf.c - IsAssignation In "Function" from "GlobalScope" line 3": + +test_loop_inf.c - IsAssignation In "Function" from "GlobalScope" line 4": + +test_loop_inf.c - IsBlockEnd In "Function" from "GlobalScope" line 5": + +test_loop_inf.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/tests/rules/samples/test_missing_semi_after_call.c b/tests/rules/samples/test_missing_semi_after_call.c new file mode 100644 index 00000000..14a72cd1 --- /dev/null +++ b/tests/rules/samples/test_missing_semi_after_call.c @@ -0,0 +1,4 @@ +void test(void) +{ + foo(42) +} diff --git a/tests/rules/samples/test_missing_semi_after_call.out b/tests/rules/samples/test_missing_semi_after_call.out new file mode 100644 index 00000000..fc116036 --- /dev/null +++ b/tests/rules/samples/test_missing_semi_after_call.out @@ -0,0 +1,10 @@ +test_missing_semi_after_call.c - IsFuncDeclaration In "GlobalScope" from "None" line 1": + +test_missing_semi_after_call.c - IsBlockStart In "Function" from "GlobalScope" line 2": + +test_missing_semi_after_call.c - IsDeclaration In "Function" from "GlobalScope" line 3": + + +test_missing_semi_after_call.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: TOO_FEW_TAB (line: 4, col: 1): Missing tabs for indent level diff --git a/tests/rules/samples/test_multiple_lines_escaping.c b/tests/rules/samples/test_multiple_lines_escaping.c new file mode 100644 index 00000000..e4ad8884 --- /dev/null +++ b/tests/rules/samples/test_multiple_lines_escaping.c @@ -0,0 +1,13 @@ +//\ +\ ola \ +\ ola \ +\ ola \ + \ + \ + \ +\ +\ +\ +\ +oxi \ +eita diff --git a/tests/rules/samples/test_multiple_lines_escaping.out b/tests/rules/samples/test_multiple_lines_escaping.out new file mode 100644 index 00000000..f833cb1f --- /dev/null +++ b/tests/rules/samples/test_multiple_lines_escaping.out @@ -0,0 +1,4 @@ +test_multiple_lines_escaping.c - IsComment In "GlobalScope" from "None" line 1": + +test_multiple_lines_escaping.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/tests/rules/samples/test_nl_after_var_decl.c b/tests/rules/samples/test_nl_after_var_decl.c new file mode 100644 index 00000000..e1897ff9 --- /dev/null +++ b/tests/rules/samples/test_nl_after_var_decl.c @@ -0,0 +1,58 @@ +void test(void) +{ + int x; + { + printf("Ok?"); + } +} + +void test2(void) +{ + int x; + {{ printf(1); }} +} + +void test3(void) +{ + int y; + (printf(1)); +} + +void test4(void) +{ + int y; + if (1) + { + printf(1); + } +} + +void test5(void) +{ + int x, y, z; + { + printf("Not ok"); + } +} + +void test6(void) +{ + int x; + { + while (1) +return ; + } +} + +void test7(void) +{ + int x; + {{return ;}} +} + +void test8(void) +{ + int x; + {if (1) + return ;} +} diff --git a/tests/rules/samples/test_nl_after_var_decl.out b/tests/rules/samples/test_nl_after_var_decl.out new file mode 100644 index 00000000..a3a030e2 --- /dev/null +++ b/tests/rules/samples/test_nl_after_var_decl.out @@ -0,0 +1,181 @@ +test_nl_after_var_decl.c - IsFuncDeclaration In "GlobalScope" from "None" line 1": + +test_nl_after_var_decl.c - IsBlockStart In "Function" from "GlobalScope" line 2": + +test_nl_after_var_decl.c - IsVarDeclaration In "Function" from "GlobalScope" line 3": + +test_nl_after_var_decl.c - IsBlockStart In "Function" from "GlobalScope" line 4": + +test_nl_after_var_decl.c - IsFunctionCall In "ControlStructure" from "Function" line 5": + +test_nl_after_var_decl.c - IsBlockEnd In "ControlStructure" from "Function" line 6": + +test_nl_after_var_decl.c - IsBlockEnd In "Function" from "GlobalScope" line 7": + +test_nl_after_var_decl.c - IsEmptyLine In "GlobalScope" from "None" line 8": + +test_nl_after_var_decl.c - IsFuncDeclaration In "GlobalScope" from "None" line 9": + +test_nl_after_var_decl.c - IsBlockStart In "Function" from "GlobalScope" line 10": + +test_nl_after_var_decl.c - IsVarDeclaration In "Function" from "GlobalScope" line 11": + +test_nl_after_var_decl.c - IsBlockStart In "Function" from "GlobalScope" line 12": + +test_nl_after_var_decl.c - IsBlockStart In "ControlStructure" from "Function" line 12": + +test_nl_after_var_decl.c - IsFunctionCall In "ControlStructure" from "ControlStructure" line 12": + +test_nl_after_var_decl.c - IsBlockEnd In "ControlStructure" from "ControlStructure" line 12": + +test_nl_after_var_decl.c - IsBlockEnd In "ControlStructure" from "Function" line 12": + +test_nl_after_var_decl.c - IsBlockEnd In "Function" from "GlobalScope" line 13": + +test_nl_after_var_decl.c - IsEmptyLine In "GlobalScope" from "None" line 14": + +test_nl_after_var_decl.c - IsFuncDeclaration In "GlobalScope" from "None" line 15": + +test_nl_after_var_decl.c - IsBlockStart In "Function" from "GlobalScope" line 16": + +test_nl_after_var_decl.c - IsVarDeclaration In "Function" from "GlobalScope" line 17": + +test_nl_after_var_decl.c - IsDeclaration In "Function" from "GlobalScope" line 18": + +test_nl_after_var_decl.c - IsBlockEnd In "Function" from "GlobalScope" line 19": + +test_nl_after_var_decl.c - IsEmptyLine In "GlobalScope" from "None" line 20": + +test_nl_after_var_decl.c - IsFuncDeclaration In "GlobalScope" from "None" line 21": + +test_nl_after_var_decl.c - IsBlockStart In "Function" from "GlobalScope" line 22": + +test_nl_after_var_decl.c - IsVarDeclaration In "Function" from "GlobalScope" line 23": + +test_nl_after_var_decl.c - IsControlStatement In "Function" from "GlobalScope" line 24": + +test_nl_after_var_decl.c - IsBlockStart In "ControlStructure" from "Function" line 25": + +test_nl_after_var_decl.c - IsFunctionCall In "ControlStructure" from "Function" line 26": + +test_nl_after_var_decl.c - IsBlockEnd In "ControlStructure" from "Function" line 27": + +test_nl_after_var_decl.c - IsBlockEnd In "Function" from "GlobalScope" line 28": + +test_nl_after_var_decl.c - IsEmptyLine In "GlobalScope" from "None" line 29": + +test_nl_after_var_decl.c - IsFuncDeclaration In "GlobalScope" from "None" line 30": + +test_nl_after_var_decl.c - IsBlockStart In "Function" from "GlobalScope" line 31": + +test_nl_after_var_decl.c - IsVarDeclaration In "Function" from "GlobalScope" line 32": + +test_nl_after_var_decl.c - IsBlockStart In "Function" from "GlobalScope" line 33": + +test_nl_after_var_decl.c - IsFunctionCall In "ControlStructure" from "Function" line 34": + +test_nl_after_var_decl.c - IsBlockEnd In "ControlStructure" from "Function" line 35": + +test_nl_after_var_decl.c - IsBlockEnd In "Function" from "GlobalScope" line 36": + +test_nl_after_var_decl.c - IsEmptyLine In "GlobalScope" from "None" line 37": + +test_nl_after_var_decl.c - IsFuncDeclaration In "GlobalScope" from "None" line 38": + +test_nl_after_var_decl.c - IsBlockStart In "Function" from "GlobalScope" line 39": + +test_nl_after_var_decl.c - IsVarDeclaration In "Function" from "GlobalScope" line 40": + +test_nl_after_var_decl.c - IsBlockStart In "Function" from "GlobalScope" line 41": + +test_nl_after_var_decl.c - IsControlStatement In "ControlStructure" from "Function" line 42": + +test_nl_after_var_decl.c - IsExpressionStatement In "ControlStructure" from "ControlStructure" line 43": + +test_nl_after_var_decl.c - IsBlockEnd In "ControlStructure" from "Function" line 44": + +test_nl_after_var_decl.c - IsBlockEnd In "Function" from "GlobalScope" line 45": + +test_nl_after_var_decl.c - IsEmptyLine In "GlobalScope" from "None" line 46": + +test_nl_after_var_decl.c - IsFuncDeclaration In "GlobalScope" from "None" line 47": + +test_nl_after_var_decl.c - IsBlockStart In "Function" from "GlobalScope" line 48": + +test_nl_after_var_decl.c - IsVarDeclaration In "Function" from "GlobalScope" line 49": + +test_nl_after_var_decl.c - IsBlockStart In "Function" from "GlobalScope" line 50": + +test_nl_after_var_decl.c - IsBlockStart In "ControlStructure" from "Function" line 50": + +test_nl_after_var_decl.c - IsExpressionStatement In "ControlStructure" from "ControlStructure" line 50": + +test_nl_after_var_decl.c - IsBlockEnd In "ControlStructure" from "ControlStructure" line 50": + +test_nl_after_var_decl.c - IsBlockEnd In "ControlStructure" from "Function" line 50": + +test_nl_after_var_decl.c - IsBlockEnd In "Function" from "GlobalScope" line 51": + +test_nl_after_var_decl.c - IsEmptyLine In "GlobalScope" from "None" line 52": + +test_nl_after_var_decl.c - IsFuncDeclaration In "GlobalScope" from "None" line 53": + +test_nl_after_var_decl.c - IsBlockStart In "Function" from "GlobalScope" line 54": + +test_nl_after_var_decl.c - IsVarDeclaration In "Function" from "GlobalScope" line 55": + +test_nl_after_var_decl.c - IsBlockStart In "Function" from "GlobalScope" line 56": + +test_nl_after_var_decl.c - IsControlStatement In "ControlStructure" from "Function" line 56": + +test_nl_after_var_decl.c - IsExpressionStatement In "ControlStructure" from "ControlStructure" line 57": + +test_nl_after_var_decl.c - IsBlockEnd In "ControlStructure" from "Function" line 57": + +test_nl_after_var_decl.c - IsBlockEnd In "Function" from "GlobalScope" line 58": + +test_nl_after_var_decl.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: NL_AFTER_VAR_DECL (line: 4, col: 1): Variable declarations must be followed by a newline +Error: NL_AFTER_VAR_DECL (line: 12, col: 1): Variable declarations must be followed by a newline +Error: BRACE_SHOULD_EOL (line: 12, col: 5): Expected newline after brace +Error: TOO_FEW_TAB (line: 12, col: 6): Missing tabs for indent level +Error: BRACE_SHOULD_EOL (line: 12, col: 7): Expected newline after brace +Error: TOO_FEW_TAB (line: 12, col: 8): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 12, col: 8): Too many instructions on a single line +Error: BRACE_SHOULD_EOL (line: 12, col: 19): Expected newline after brace +Error: TOO_FEW_TAB (line: 12, col: 19): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 12, col: 19): Too many instructions on a single line +Error: TOO_FEW_TAB (line: 12, col: 20): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 12, col: 20): Too many instructions on a single line +Error: NL_AFTER_VAR_DECL (line: 18, col: 1): Variable declarations must be followed by a newline +Error: NL_AFTER_VAR_DECL (line: 24, col: 1): Variable declarations must be followed by a newline +Error: MULT_DECL_LINE (line: 32, col: 10): Multiple declarations on a single line +Error: MULT_DECL_LINE (line: 32, col: 13): Multiple declarations on a single line +Error: NL_AFTER_VAR_DECL (line: 33, col: 1): Variable declarations must be followed by a newline +Error: TOO_MANY_FUNCS (line: 38, col: 1): Too many functions in file +Error: NL_AFTER_VAR_DECL (line: 41, col: 1): Variable declarations must be followed by a newline +Error: TOO_FEW_TAB (line: 43, col: 1): Missing tabs for indent level +Error: TOO_MANY_FUNCS (line: 47, col: 1): Too many functions in file +Error: NL_AFTER_VAR_DECL (line: 50, col: 1): Variable declarations must be followed by a newline +Error: BRACE_SHOULD_EOL (line: 50, col: 5): Expected newline after brace +Error: BRACE_SHOULD_EOL (line: 50, col: 6): Expected newline after brace +Error: TOO_FEW_TAB (line: 50, col: 6): Missing tabs for indent level +Error: TOO_FEW_TAB (line: 50, col: 7): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 50, col: 7): Too many instructions on a single line +Error: NO_SPC_BFR_OPR (line: 50, col: 14): extra space before operator +Error: BRACE_SHOULD_EOL (line: 50, col: 15): Expected newline after brace +Error: TOO_FEW_TAB (line: 50, col: 15): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 50, col: 15): Too many instructions on a single line +Error: TOO_FEW_TAB (line: 50, col: 16): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 50, col: 16): Too many instructions on a single line +Error: TOO_MANY_FUNCS (line: 53, col: 1): Too many functions in file +Error: NL_AFTER_VAR_DECL (line: 56, col: 1): Variable declarations must be followed by a newline +Error: BRACE_SHOULD_EOL (line: 56, col: 5): Expected newline after brace +Error: TOO_FEW_TAB (line: 56, col: 6): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 56, col: 6): Too many instructions on a single line +Error: MIXED_SPACE_TAB (line: 57, col: 1): Mixed spaces and tabs +Error: TOO_FEW_TAB (line: 57, col: 1): Missing tabs for indent level +Error: NO_SPC_BFR_OPR (line: 57, col: 16): extra space before operator +Error: TOO_FEW_TAB (line: 57, col: 17): Missing tabs for indent level +Error: TOO_MANY_INSTR (line: 57, col: 17): Too many instructions on a single line diff --git a/norminette/tests/rules/testfile_210104.h b/tests/rules/samples/testfile_210104.h similarity index 100% rename from norminette/tests/rules/testfile_210104.h rename to tests/rules/samples/testfile_210104.h diff --git a/norminette/tests/rules/testfile_210104.out b/tests/rules/samples/testfile_210104.out similarity index 76% rename from norminette/tests/rules/testfile_210104.out rename to tests/rules/samples/testfile_210104.out index 1b1da026..d9e793d9 100644 --- a/norminette/tests/rules/testfile_210104.out +++ b/tests/rules/samples/testfile_210104.out @@ -1,7 +1,7 @@ testfile_210104.h - IsPreprocessorStatement In "GlobalScope" from "None" line 1": - + testfile_210104.h - IsPreprocessorStatement In "GlobalScope" from "None" line 2": - + testfile_210104.h - IsEmptyLine In "GlobalScope" from "None" line 3": testfile_210104.h - IsUserDefinedType In "GlobalScope" from "None" line 4": @@ -21,16 +21,17 @@ testfile_210104.h - IsEmptyLine In "GlobalScope" from "None" line 11": testfile_210104.h - IsPreprocessorStatement In "GlobalScope" from "None" line 12": - + testfile_210104.h - IsPreprocessorStatement In "GlobalScope" from "None" line 13": - + testfile_210104.h - IsEmptyLine In "GlobalScope" from "None" line 14": testfile_210104.h - IsPreprocessorStatement In "GlobalScope" from "None" line 15": - + testfile_210104.h - IsEmptyLine In "GlobalScope" from "None" line 16": testfile_210104.h - IsComment In "GlobalScope" from "None" line 17": -testfile_210104.h: KO! - TOO_MANY_VALS (line: 12, col: 24): Too many values on define +testfile_210104.h: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: PREPROC_CONSTANT (line: 12, col: 15): Preprocessor statement must only contain constant defines diff --git a/norminette/tests/rules/testfile_210104_2.c b/tests/rules/samples/testfile_210104_2.c similarity index 100% rename from norminette/tests/rules/testfile_210104_2.c rename to tests/rules/samples/testfile_210104_2.c diff --git a/norminette/tests/rules/testfile_210104_2.out b/tests/rules/samples/testfile_210104_2.out similarity index 92% rename from norminette/tests/rules/testfile_210104_2.out rename to tests/rules/samples/testfile_210104_2.out index 6ccf9d43..e78b06e9 100644 --- a/norminette/tests/rules/testfile_210104_2.out +++ b/tests/rules/samples/testfile_210104_2.out @@ -48,5 +48,7 @@ testfile_210104_2.c - IsBlockEnd In "Function" from "GlobalScope" line 25": -testfile_210104_2.c: KO! - WRONG_SCOPE_COMMENT (line: 5, col: 5): Comment is invalid in this scope +testfile_210104_2.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: WRONG_SCOPE_COMMENT (line: 5, col: 5): Comment is invalid in this scope +Error: FORBIDDEN_TYPEDEF (line: 18, col: 1): Typedef declaration are not allowed in .c files diff --git a/norminette/tests/rules/testfile_210104_3.c b/tests/rules/samples/testfile_210104_3.c similarity index 100% rename from norminette/tests/rules/testfile_210104_3.c rename to tests/rules/samples/testfile_210104_3.c diff --git a/norminette/tests/rules/testfile_210104_3.out b/tests/rules/samples/testfile_210104_3.out similarity index 94% rename from norminette/tests/rules/testfile_210104_3.out rename to tests/rules/samples/testfile_210104_3.out index a67ad2cb..43419bc3 100644 --- a/norminette/tests/rules/testfile_210104_3.out +++ b/tests/rules/samples/testfile_210104_3.out @@ -44,6 +44,7 @@ testfile_210104_3.c - IsBlockEnd In "Function" from "GlobalScope" line 23": -testfile_210104_3.c: KO! - SPC_AFTER_OPERATOR (line: 20, col: 20): missing space after operator - SPC_AFTER_OPERATOR (line: 21, col: 24): missing space after operator +testfile_210104_3.c: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: SPC_AFTER_OPERATOR (line: 20, col: 20): missing space after operator +Error: SPC_AFTER_OPERATOR (line: 21, col: 24): missing space after operator diff --git a/norminette/tests/rules/testfile_210104_4.c b/tests/rules/samples/testfile_210104_4.c similarity index 53% rename from norminette/tests/rules/testfile_210104_4.c rename to tests/rules/samples/testfile_210104_4.c index 57270d13..de442f68 100644 --- a/norminette/tests/rules/testfile_210104_4.c +++ b/tests/rules/samples/testfile_210104_4.c @@ -1 +1,11 @@ int ((*g_conv[13])(t_syntax syntax, t_buffer *buffer, va_list va)); + +void main(void) +{ + 2 +*y; +} + +void main(void) +{ + 2 + *y; +} diff --git a/tests/rules/samples/testfile_210104_4.out b/tests/rules/samples/testfile_210104_4.out new file mode 100644 index 00000000..4751d040 --- /dev/null +++ b/tests/rules/samples/testfile_210104_4.out @@ -0,0 +1,26 @@ +testfile_210104_4.c - IsVarDeclaration In "GlobalScope" from "None" line 1": + +testfile_210104_4.c - IsEmptyLine In "GlobalScope" from "None" line 2": + +testfile_210104_4.c - IsFuncDeclaration In "GlobalScope" from "None" line 3": + +testfile_210104_4.c - IsBlockStart In "Function" from "GlobalScope" line 4": + +testfile_210104_4.c - IsDeclaration In "Function" from "GlobalScope" line 5": + +testfile_210104_4.c - IsBlockEnd In "Function" from "GlobalScope" line 6": + +testfile_210104_4.c - IsEmptyLine In "GlobalScope" from "None" line 7": + +testfile_210104_4.c - IsFuncDeclaration In "GlobalScope" from "None" line 8": + +testfile_210104_4.c - IsBlockStart In "Function" from "GlobalScope" line 9": + +testfile_210104_4.c - IsDeclaration In "Function" from "GlobalScope" line 10": + +testfile_210104_4.c - IsBlockEnd In "Function" from "GlobalScope" line 11": + +testfile_210104_4.c: Error! +Notice: GLOBAL_VAR_DETECTED (line: 1, col: 1): Global variable present in file. Make sure it is a reasonable choice. +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header +Error: SPC_AFTER_OPERATOR (line: 5, col: 7): missing space after operator diff --git a/norminette/tests/rules/testfile_210108.h b/tests/rules/samples/testfile_210108.h similarity index 100% rename from norminette/tests/rules/testfile_210108.h rename to tests/rules/samples/testfile_210108.h diff --git a/norminette/tests/rules/testfile_210108.out b/tests/rules/samples/testfile_210108.out similarity index 87% rename from norminette/tests/rules/testfile_210108.out rename to tests/rules/samples/testfile_210108.out index d5921306..695f8f92 100644 --- a/norminette/tests/rules/testfile_210108.out +++ b/tests/rules/samples/testfile_210108.out @@ -1,7 +1,7 @@ testfile_210108.h - IsPreprocessorStatement In "GlobalScope" from "None" line 1": - + testfile_210108.h - IsPreprocessorStatement In "GlobalScope" from "None" line 2": - + testfile_210108.h - IsEmptyLine In "GlobalScope" from "None" line 3": testfile_210108.h - IsUserDefinedType In "GlobalScope" from "None" line 4": @@ -27,5 +27,6 @@ testfile_210108.h - IsEmptyLine In "GlobalScope" from "None" line 14": testfile_210108.h - IsPreprocessorStatement In "GlobalScope" from "None" line 15": - -testfile_210108.h: OK! + +testfile_210108.h: Error! +Error: INVALID_HEADER (line: 1, col: 1): Missing or invalid 42 header diff --git a/tests/test_errors.py b/tests/test_errors.py new file mode 100644 index 00000000..6ecce44c --- /dev/null +++ b/tests/test_errors.py @@ -0,0 +1,153 @@ +import json +from typing import List +from dataclasses import astuple + +import pytest +from unittest.mock import patch + +from norminette.file import File +from norminette.lexer import Lexer +from norminette.context import Context +from norminette.registry import Registry +from norminette.errors import JSONErrorsFormatter +from norminette.errors import Error, Errors, Highlight as H +from norminette.errors import HumanizedErrorsFormatter + + +@pytest.mark.parametrize("files, expected_result, ", [it.values() for it in [ + { + "files": [ + File("/nium/a.c", "#include "), + File("/nium/b.c", "int\tmain(void)\n{\n\treturn (1);\n}\n"), + File("/nium/c.c", "int\tfn(int n);\n"), + ], + "expected_result": "a.c: OK!\nb.c: OK!\nc.c: OK!\n", + }, + { + "files": [ + File("skyfall.c", "// Hello"), + ], + "expected_result": "skyfall.c: OK!\n", + }, + { + "files": [ + File("/nium/mortari.c", "#define TRUE 1"), + File("/nium/gensler.c", "int\tmain();\n"), + ], + "expected_result": ( + "mortari.c: OK!\n" + "gensler.c: Error!\n" + "Error: NO_ARGS_VOID (line: 1, col: 10):\tEmpty function argument requires void\n" + ) + }, + { + "files": [ + File("/nium/john.c", "#define x"), + File("/nium/galt.c", "#define x"), + ], + "expected_result": ( + "john.c: Error!\n" + "Error: MACRO_NAME_CAPITAL (line: 1, col: 9):\tMacro name must be capitalized\n" + "galt.c: Error!\n" + "Error: MACRO_NAME_CAPITAL (line: 1, col: 9):\tMacro name must be capitalized\n" + ) + }, + ] +]) +def test_humanized_formatter_errored_file(files: List[File], expected_result: str): + registry = Registry() + + with patch("norminette.rules.check_header.CheckHeader.run") as _: + for file in files: + lexer = Lexer(file) + context = Context(file, list(lexer)) + registry.run(context) + + formatter = HumanizedErrorsFormatter(files) + assert str(formatter) == expected_result + + +tests = [ + { + "file": File("/nium/test.c", "int\tmain()\n{\n\treturn ;\n}\n"), + "test": { + "files": [ + { + "path": "/nium/test.c", + "status": "Error", + "errors": [ + { + "name": "INVALID_HEADER", + "text": "Missing or invalid 42 header", + "level": "Error", + "highlights": [{"lineno": 1, "column": 1, "length": None, "hint": None}], + }, + { + "name": "NO_ARGS_VOID", + "text": "Empty function argument requires void", + "level": "Error", + "highlights": [{"lineno": 1, "column": 10, "length": None, "hint": None}], + }, + ], + }, + ], + }, + }, +] + + +@pytest.mark.parametrize("file,test", [it.values() for it in tests]) +def test_json_formatter_errored_file(file, test): + lexer = Lexer(file) + context = Context(file, list(lexer)) + Registry().run(context) + + formatter = JSONErrorsFormatter(file) + assert str(formatter) == json.dumps(test, separators=(',', ':')) + '\n' + + +def test_error_from_name(): + Error.from_name("NO_ARGS_VOID") + with pytest.raises(KeyError): + Error.from_name("KeyThatDoesNoExists") + + +@pytest.mark.parametrize("errors", [ + [ + Error("BAD_NAME", "Names can't be started with an '_'", "Error", [H(3, 5, 5)]), + Error("GLOBAL_VAR", "Global variables detected, take care", "Notice", [H(2, 1, 1)]), + Error("test", "ola", "Error", [H(1, 1, 1)]), + ], +]) +def test_add_error_signature(errors: List[Error]): + sequence = Errors() + + for error in errors: + sequence.add(error) + + assert len(sequence) == len(errors) + assert list(errors) == errors + + +@pytest.mark.parametrize("args, kwargs", [ + [["NO_ARGS_VOID",], {"highlights": [H(1, 1, 2)]}], +]) +def test_add_name_signature(args, kwargs): + assert isinstance(args, list) and len(args) == 1 + assert set() == set(kwargs) - {"level", "highlights"} + + errors = Errors() + errors.add(*args, **kwargs) + + +def test_error_add_highlight(): + highlights = [ + H(lineno=1, column=1, length=1), + H(lineno=1, column=2, length=1), + ] + + error = Error("42", "42") + error.add_highlight(highlights[0]) + error.add_highlight(*astuple(highlights[1])) + + assert error.highlights == highlights diff --git a/tests/test_lexer.py b/tests/test_lexer.py new file mode 100644 index 00000000..ad647b00 --- /dev/null +++ b/tests/test_lexer.py @@ -0,0 +1,587 @@ +from itertools import chain +from typing import Dict, Any, List, Optional, Tuple + +import pytest + +from norminette.lexer import Token as T +from norminette.lexer.dictionary import keywords, operators, brackets +from norminette.errors import Error as E, Highlight as H +from norminette.exceptions import UnexpectedEOF +from tests.utils import ( + dict_to_pytest_param, + lexer_from_source, +) + + +@pytest.mark.parametrize("source, parameters, expected", dict_to_pytest_param({ + "No args": ["oi", {}, 'o'], + "Empty source": ['', {}, None], + "Collect over than source length": ["hello", {"collect": 10}, "hello"], + "Collect with empty source": ['', {"collect": 3}, None], + "Offset in empty source": ['', {"offset": 3}, None], + "Offset": ["Hello", {"offset": 2}, 'l'], + "Offset with collect": ["Hello, world!", {"offset": 7, "collect": 5}, "world"], + "Offset over than source length with collect": ["Hello, world!", {"offset": 14, "collect": 3}, None], + "Newline": ["\naa", {}, '\n'], + "Escaped newline": ["\\\n", {}, '\\'], +})) +def test_lexer_raw_peek(source: str, parameters: Dict[str, Any], expected: Optional[str]): + lexer = lexer_from_source(source) + + assert lexer.raw_peek(**parameters) == expected + + +@pytest.mark.parametrize("source, parameters, expected", dict_to_pytest_param({ + "Single source char": ['{', {}, ('{', 1)], + "Single digraph source": ["<%", {}, ('{', 2)], + "Single trigraph source": ["??<", {}, ('{', 3)], + "Newline": ['\n', {}, ('\n', 1)], + "Escaped newline": ["\\\n", {}, ('\\', 1)], + "Times with exact chars": ["abc", {"times": 3}, ("abc", 3)], + "Times with trigraphs": [r"??", []], + "ASCII number": ["'9'", "", []], + "Single quote escaped": [r"'\''", r"", []], + "Newline": [r"'\n'", r"", []], + "Empty char": ["''", "", [ + E.from_name("EMPTY_CHAR", highlights=[H(lineno=1, column=1, length=2)])], + ], + "String quote": ['"a"', "None", []], + "Int literal": ['1', "None", []], + "Null": [r"'\0'", r"", []], + "Hexadecimal char E9 (é)": [r"'\xE9'", R"", []], + "Hexadecimal char without sequence": [r"'\x'", R"", [ + E.from_name("NO_HEX_DIGITS", level="Notice", highlights=[ + H(lineno=1, column=3, length=1), + ]), + ]], + "Escape sequence that doesn't exists": [r"'\j'", r"", [ + E.from_name("UNKNOWN_ESCAPE", level="Notice", highlights=[ + H(lineno=1, column=3, length=1), + ]), + ]], + "Char too long": ["'John Galt'", "", [ + E.from_name("CHAR_AS_STRING", highlights=[ + H(lineno=1, column=1, length=len("'John Galt'")), + H(lineno=1, column=1, length=1, + hint="Perhaps you want a string (double quote, \") instead of a char (single quote, ')?"), + ]) + ]], + "Char with L prefix": ["L'a'", "", []], + "Char escaped with L prefix": [r"L'\n'", r"", []], + "Hex with one digit": [r"'\xA'", r"", []], + "Hex with two digits": [r"'\x3F'", r"", []], + "U prefixed char": ["U'h'", "", []], + "u8 prefixed char": ["u8'h'", "", []], + "Bad prefixed char": ["s'h'", "None", []], +})) +def test_lexer_parse_char_literal(source: str, str_expected: str, errors: List[E]): + lexer = lexer_from_source(source) + token = lexer.parse_char_literal() + + assert str(token) == str_expected + assert repr(lexer.file.errors) == repr(errors) + + +@pytest.mark.parametrize("source, str_expected, errors", dict_to_pytest_param({ + "Empty string": ["\"\"", "", []], + "ASCII normal string": ["\"x+1=2, where x=1\"", "", []], + "Single quote string": ["'teste'", "None", []], + "Unexpected EOF with empty string": ['\"', "", [ + E.from_name("UNEXPECTED_EOF_STR", highlights=[ + H(lineno=1, column=1, length=1), + H(lineno=1, column=2, length=1, hint="Perhaps you forgot a double quote (\")?"), + ]), + ]], + "Unexpected EOF": ['\"asd', "", [ + E.from_name("UNEXPECTED_EOF_STR", highlights=[ + H(lineno=1, column=1, length=4), + H(lineno=1, column=5, length=1, hint="Perhaps you forgot a double quote (\")?"), + ]), + ]], + "String with escaped new line": ["\"first\\\n second\"", "", []], + "Basic string": ["\"Basic string\"", "", []], + "L basic string": ["L\"Basic string\"", "", []], + "U prefixed string": ["U\"hIGH\"", "", []], + "u8 prefixed string": ["u8\"hIGH\"", "", []], + "Bad prefixed string": ["s\"hIGH\"", "None", []], + "String with escaped quotes": ["\"Basic \\\"string\\\"\"", "", []], + "Multiples escapes and escaped quote": [r'"Escaped \\\"string\\\\\"\\"', + r'', + []], +})) +def test_lexer_parse_string_literal(source: str, str_expected: str, errors: List[E]): + lexer = lexer_from_source(source) + token = lexer.parse_string_literal() + + assert str(token) == str_expected + assert repr(lexer.file.errors) == repr(errors) + + +@pytest.mark.parametrize("source, str_expected", dict_to_pytest_param({ + "Empty comment": ["//", ""], + "Comment at EOF": ["// The sky is falling", ""], + "Comment at EOL": ["// The sky is falling\n", ""], + "Comment with escaped line in EOF": ["// The sky is falling\\", r""], + "Comment with escaped line in EOF using trigraph": [r"// The sky is falling??/", + r""], + "Comment with escaped line": ["// The sky is falling\\\n!", ""], + "Comment with escaped line using trigraph": ["// The sky is falling??/\n!", ""], +})) +def test_lexer_parse_line_comment(source: str, str_expected: str): + lexer = lexer_from_source(source) + token = lexer.parse_line_comment() + + assert str(token) == str_expected + assert lexer.file.errors.status == "OK" + + +@pytest.mark.parametrize("source, str_expected, errors", dict_to_pytest_param({ + "Multi-line comment in single line at EOF": ["/* The sky is falling*/", + "", []], + "Multi-line comment in multiples lines at EOF": ["/*\na\nb\n\n\n*/", "", []], + "Multi-line comment with escaped line": ["/*\\\na*/", "", []], + "Multi-line comment with escaped line using trigraph": ["/*??/\na*/", "", []], + "Multi-line comment not terminated with escaped line before EOF": ["/*\\\n", "", [ + E.from_name("UNEXPECTED_EOF_MC", highlights=[ + H(lineno=1, column=1, length=len("/*")), + ]), + ]], + "Multi-line comment not terminated": ["/* uepaaa\ne agora??", "", [ + E.from_name("UNEXPECTED_EOF_MC", highlights=[ + H(lineno=1, column=1, length=len("/* uepaaa\ne agora??")), + ]), + ]], + "Multi-line comment not terminate ending with a backslash": ["/*\\", r"", [ + E.from_name("UNEXPECTED_EOF_MC", highlights=[ + H(lineno=1, column=1, length=len("/*\\")), + ]), + ]], + "Comment (not multi-line)": ["// hey, i'm not a multi-line comment", "None", []], + "Space before a multi-line comment": [" /* */", "None", []], +})) +def test_lexer_parse_multi_line_comment(source: str, str_expected: str, errors: List[E]): + lexer = lexer_from_source(source) + token = lexer.parse_multi_line_comment() + + assert str(token) == str_expected + assert repr(lexer.file.errors) == repr(errors) + + +@pytest.mark.parametrize("source, str_expected, errors", dict_to_pytest_param({ + "Decimal integer": ["1234567890", "", []], + "Decimal integer with UL as suffix": ["1234567890UL", "", []], + "Decimal integer with bad suffix": ["1234567890ABC", "", [ + E.from_name("INVALID_SUFFIX", highlights=[ + H(lineno=1, column=11, length=len("ABC")), + ]), + ]], + "Binary integer": ["0b1101011", "", []], + "Binary integer with U as suffix": ["0b000001U", "", []], + "Binary integer with bad digits": ["0b1210491011", "", [ + E.from_name("INVALID_BIN_INT", highlights=[ + H(lineno=1, column=4, length=1, hint=None), # 2 + H(lineno=1, column=7, length=1, hint=None), # 4 + H(lineno=1, column=8, length=1, hint=None), # 9 + ]), + ]], + "Binary with bad suffix": ["0b0101e", "", [ + E.from_name("INVALID_SUFFIX", highlights=[H(lineno=1, column=7, length=1)]), + ]], + "Octal integer": ["01234567123", "", []], + "Octal integer with U as suffix": ["0123u", "", []], + "Octal integer with bad digits": ["00072189", "", [ + E.from_name("INVALID_OCT_INT", highlights=[ + H(lineno=1, column=7, length=1, hint=None), # 8 + H(lineno=1, column=8, length=1, hint=None), # 9 + ]), + ]], + "Octal integer with bad suffix with dots": ["000123u.23", "", [ + E.from_name("INVALID_SUFFIX", highlights=[ + H(lineno=1, column=len("000123") + 1, length=len("u.23")), + ]), + ]], + "Hexadecimal with bad suffix": ["0x1uLl;", "", [ + E.from_name("INVALID_SUFFIX", highlights=[H(lineno=1, column=4, length=3)]), + ]], + "Integer with u suffix": ["123u", "", []], + "Integer with U suffix": ["123U", "", []], + "Integer with uz suffix": ["123uz", "", []], + "Integer with UZ suffix": ["123UZ", "", []], + "Integer with z suffix": ["123z", "", []], + "Integer with Z suffix": ["123Z", "", []], + "Integer with ul suffix": ["123ul", "", []], + "Integer with UL suffix": ["123UL", "", []], + "Integer with ull suffix": ["123ull", "", []], + "Integer with ULL suffix": ["123ULL", "", []], + "Integer with ll suffix": ["9000000000ll", "", []], + "Integer with LL suffix": ["9000000000LL", "", []], + "Integer with bad suffix": ["10Uu", "", [ + E.from_name("INVALID_SUFFIX", highlights=[H(lineno=1, column=3, length=len("10"))]), + ]], +})) +def test_lexer_parse_integer_literal(source: str, str_expected: str, errors: List[E]): + lexer = lexer_from_source(source) + token = lexer.parse_integer_literal() + + assert str(token) == str_expected + assert repr(lexer.file.errors) == repr(errors) + + +@pytest.mark.parametrize("source, str_expected, errors", dict_to_pytest_param({ + "Integer": ["1234567890", "None", []], + "Integer with exponent-part": ["1e2", "", []], + "Integer with exponent-part and f as suffix": ["1e2f", "", []], + "Integer with bad exponent-part": ["1eeee2xf", "", [ + E.from_name("BAD_EXPONENT", highlights=[H(lineno=1, column=2, length=7)]), + ]], + "Exponent with sign": ["1e+3", "", []], + "Bad float followed by an unary expression": ["45e++ai", "", [ + E.from_name("BAD_EXPONENT", highlights=[H(lineno=1, column=3, length=2)]), + ]], + "Identifier with numbers": ["e42", "None", []], + "Fractional exponent with bad suffix": [".0e4x;", "", [ + E.from_name("BAD_FLOAT_SUFFIX", highlights=[H(lineno=1, column=5, length=1)]), + ]], + "Integer with bad suffix": ["10uu", "None", []], + "Bad suffix with all parts": ["10.12fe10", "", [ + E.from_name("BAD_FLOAT_SUFFIX", highlights=[H(lineno=1, column=6, length=len("fe10"))]), + ]], + "Float without fractional part but with suffix": ["10.f", "", []], + "Float without fractional part but bad suffix": ["10.fU", "", [ + E.from_name("BAD_FLOAT_SUFFIX", highlights=[H(lineno=1, column=4, length=2)]), + ]], + "Real bad suffix": ["21.3E56E4654", "", [ + E.from_name("BAD_FLOAT_SUFFIX", highlights=[H(lineno=1, column=8, length=5)]), + ]], + "Exponent with D suffix": ["105e4d", "", []], + "Bad exponent followed by a suffix": ["105eu", "", [ + E.from_name("BAD_EXPONENT", highlights=[H(lineno=1, column=4, length=2)]), + ]], + "Multiple dots": ["1.1..2.3.4.5", "", [ + E.from_name("MULTIPLE_DOTS", highlights=[H(lineno=1, column=4, length=len("..2.3.4.5"))]), + ]], + "Hexadecimal multiple dots": ["0xF.22..2.3.4.5", "", [ + E.from_name("MULTIPLE_DOTS", highlights=[H(lineno=1, column=7, length=len("..2.3.4.5"))]), + ]], + "Hexadecimal with just constant": ["0xC0FFE", "None", []], + "Hexadecimal integer with suffix": ["0XA0000024u", "None", []], + "Hexadecimal integer with double suffix": ["0XA0000021uL", "None", []], + "Multiple X": ["0xxXxxX123.32f", "", [ + E.from_name("MULTIPLE_X", highlights=[H(lineno=1, column=2, length=len("xxXxxX"))]), + ]], + "Multiple X in an integer hexadecimal": ["0xX1", "None", []], + "Multiple X with exponent": ["0xxAp2", "", [ + E.from_name("MULTIPLE_X", highlights=[H(lineno=1, column=2, length=2)]), + ]], + **{ + # https://www.gnu.org/software/c-intro-and-ref/manual/html_node/Floating-Constants.html + f"Float GNU {number} {source!r}": [source, f"", []] + for number, source in enumerate(( + "1500.0", "15e2", "15e+2", "15.0e2", "1.5e+3", ".15e4", "15000e-1", + "1.0", "1000.", "3.14159", ".05", ".0005", "1e0", "1.0000e0", "100e1", + "100e+1", "100E+1", "1e3", "10000e-1", "3.14159e0", "5e-2", ".0005e+2", + "5E-2", ".0005E2", ".05e-2", "3.14159f", "3.14159e0f", "1000.f", "100E1F", + ".0005f", ".05e-2f", + "0xAp2", "0xAp-1", "0x2.0Bp4", "0xE.2p3", "0x123.ABCp0", + "0x123.ABCp4", "0x100p-8", "0x10p-4", "0x1p+4", "0x1p+8", + )) + } +})) +def test_lexer_parse_float_literal(source: str, str_expected: str, errors: List[E]): + lexer = lexer_from_source(source) + token = lexer.parse_float_literal() + + assert str(token) == str_expected + assert repr(lexer.file.errors) == repr(errors) + + +@pytest.mark.parametrize("source, str_expected", dict_to_pytest_param({ + "Identifier starting with an integer": ["42_hello", "None"], + "Identifier starting with an underscore": ["_hello", ""], + "ft_printf identifier": ["ft_printf", ""], + "Identifier with just underscore": ['_', ""], + "Identifier with just one letter": ['a', ""], + "Identifier with uppercase letters": ["EGGS", ""], + "Identifier with mixedcase letters": ["AbCd", ""], + "Identifier with lowercase letters": ["duck", ""], + "Identifier with an hyphen": ["clojure-is-cool", ""], + "Identifier with integers, letters and underscores": ["ascii_2_bigint128", ""], + "String starting with an letter": ["L\"ola\"", ""], + "Char starting with an letter": ["L'1'", ""], +})) +def test_lexer_parse_identifier(source: str, str_expected: str): + lexer = lexer_from_source(source) + token = lexer.parse_identifier() + + assert str(token) == str_expected + assert lexer.file.errors.status == "OK" + + +@pytest.mark.parametrize("keyword", keywords.keys()) +def test_lexer_parse_identifier_keyword_only(keyword: str): + lexer = lexer_from_source(keyword) + token = lexer.parse_identifier() + + assert str(token) == f"<{keyword.upper()}>" + assert lexer.file.errors.status == "OK" + + +@pytest.mark.parametrize("operator, token_type", list(operators.items()) + [ + ["??=", "HASH"], + ["%:", "HASH"], + ["??'", "BWISE_XOR"], + ["??'=", "XOR_ASSIGN"], + ["??!", "BWISE_OR"], + ["??!??!", "OR"], + ["??!=", "OR_ASSIGN"], + ["??-", "BWISE_NOT"], +]) +def test_lexer_parse_operator(operator: str, token_type: str): + lexer = lexer_from_source(operator) + token = lexer.parse_operator() + + assert str(token) == f"<{token_type}>" + assert lexer.file.errors.status == "OK" + + +@pytest.mark.parametrize("bracket, token_type", list(brackets.items()) + [ + ["<%", "LBRACE"], + ["??<", "LBRACE"], + ["%>", "RBRACE"], + ["??>", "RBRACE"], + ["<:", "LBRACKET"], + ["??(", "LBRACKET"], + [":>", "RBRACKET"], + ["??)", "RBRACKET"], +]) +def test_lexer_parse_brackets(bracket: str, token_type: str): + lexer = lexer_from_source(bracket) + token = lexer.parse_brackets() + + assert str(token) == f"<{token_type}>" + assert lexer.file.errors.status == "OK" + + +@pytest.mark.parametrize("source, expected_tokens", dict_to_pytest_param({ + "Empty source": ['', []], + "Just space source": [" ", [ + T("SPACE", (1, 1)), + T("SPACE", (1, 2)), + T("SPACE", (1, 3)), + ]], + "Identifier followed by a comment": ["test//comment", [ + T("IDENTIFIER", (1, 1), "test"), + T("COMMENT", (1, 5), "//comment"), + ]], + "Main function prototype with void": ["int\tmain(void);", [ + T("INT", (1, 1)), + T("TAB", (1, 4)), + T("IDENTIFIER", (1, 5), value="main"), + T("LPARENTHESIS", (1, 9)), + T("VOID", (1, 10)), + T("RPARENTHESIS", (1, 14)), + T("SEMI_COLON", (1, 15)), + ]], + # Checks if `identifier` is bellow to `char` and `string` + "Wide char/string followed by identifier": ["L'a' L\"bcd\" name", [ + T("CHAR_CONST", (1, 1), value="L'a'"), + T("SPACE", (1, 5)), + T("STRING", (1, 6), value="L\"bcd\""), + T("SPACE", (1, 12)), + T("IDENTIFIER", (1, 13), value="name"), + ]], + "Integer": ["42", [T("CONSTANT", (1, 1), value="42")]], + "Integer with plus sign": ["+42", [ + T("PLUS", (1, 1)), + T("CONSTANT", (1, 2), value="42"), + ]], + "Integer with minus sign": ["-42", [ + T("MINUS", (1, 1)), + T("CONSTANT", (1, 2), value="42"), + ]], + "Integer with double sign": ["+-42", [ + T("PLUS", (1, 1)), + T("MINUS", (1, 2)), + T("CONSTANT", (1, 3), value="42"), + ]], + "Float": ["4.2", [T("CONSTANT", (1, 1), value="4.2")]], + "Float without integer part": [".42", [T("CONSTANT", (1, 1), value=".42")]], + "Float exponential": ["4e2", [T("CONSTANT", (1, 1), value="4e2")]], + "Float with exponential in fractional part without integer": [".4e2", [T("CONSTANT", (1, 1), value=".4e2")]], + "Float exponential with suffix": ["4e2f", [T("CONSTANT", (1, 1), value="4e2f")]], + "Float exponential in fractional part with suffix": [".4e2f", [T("CONSTANT", (1, 1), value=".4e2f")]], + "Octal": ["042", [T("CONSTANT", (1, 1), value="042")]], + "Hexadecimal": ["0x42", [T("CONSTANT", (1, 1), value="0x42")]], + "Negative hexadecimal": ["-0x4e2", [ + T("MINUS", (1, 1)), + T("CONSTANT", (1, 2), value="0x4e2"), + ]], + "Integer with l as suffix": ["42l", [T("CONSTANT", (1, 1), value="42l")]], + "Integer with ul as suffix": ["42ul", [T("CONSTANT", (1, 1), value="42ul")]], + "Integer with ll as suffix": ["42ll", [T("CONSTANT", (1, 1), value="42ll")]], + "Integer with ull as suffix": ["42ull", [T("CONSTANT", (1, 1), value="42ull")]], + "Integer with u suffix": ["42u", [T("CONSTANT", (1, 1), value="42u")]], + "Multiples signs": ["-+-+-+-+-+-+-+-0Xe4Ae2", [ + T("MINUS", (1, 1)), + T("PLUS", (1, 2)), + T("MINUS", (1, 3)), + T("PLUS", (1, 4)), + T("MINUS", (1, 5)), + T("PLUS", (1, 6)), + T("MINUS", (1, 7)), + T("PLUS", (1, 8)), + T("MINUS", (1, 9)), + T("PLUS", (1, 10)), + T("MINUS", (1, 11)), + T("PLUS", (1, 12)), + T("MINUS", (1, 13)), + T("PLUS", (1, 14)), + T("MINUS", (1, 15)), + T("CONSTANT", (1, 16), value="0Xe4Ae2"), + ]], + "Member expression with left part": [".e42", [ + T("DOT", (1, 1)), + T("IDENTIFIER", (1, 2), value="e42") + ]], + "Multiples dots in float": ["4.4.4", [T("CONSTANT", (1, 1), value="4.4.4")]], + "Multiples exponents": ["4e4e4", [T("CONSTANT", (1, 1), value="4e4e4")]], + "Bad suffix 1": ["4x4x4", [T("CONSTANT", (1, 1), value="4x4x4")]], + "Bad suffix 2": ["42uul", [T("CONSTANT", (1, 1), value="42uul")]], + "Bad suffix 3": ["42Lllu", [T("CONSTANT", (1, 1), value="42Lllu")]], + "Bad suffix 4": ["42lul", [T("CONSTANT", (1, 1), value="42lul")]], + "Bad exponent": [".42e", [T("CONSTANT", (1, 1), ".42e")]], + "Escaped newline followed by an identifier": ["\\\nhello;", [ + T("IDENTIFIER", (2, 1), value="hello"), + T("SEMI_COLON", (2, 6)), + ]], + # TODO Add tests for digraphs/trigraphs + **dict(chain.from_iterable(map(dict.items, ( + { + f"Empty {name}": [f"#{name}", [ + T("HASH", (1, 1)), + T("IDENTIFIER", (1, 2), value=name), + ]], + f"Empty spaced {name}": [f"# {name} ", [ + T("HASH", (1, 1)), + T("SPACE", (1, 2)), + T("IDENTIFIER", (1, 3), value=name), + T("SPACE", (1, 3 + len(name))), + ]], + f"Empty {name} ending with withespaces": [f"#{name} ", [ + T("HASH", (1, 1)), + T("IDENTIFIER", (1, 2), value=name), + T("SPACE", (1, 2 + len(name))), + T("TAB", (1, 3 + len(name))), + ]], + f"Empty {name} ending with a comment separated by space": [f"#{name} //bla", [ + T("HASH", (1, 1)), + T("IDENTIFIER", (1 , 2), value=name), + T("SPACE", (1, 2 + len(name))), + T("COMMENT", (1, 3 + len(name)), value="//bla"), + ]], + f"Empty {name} followed by a comment": [f"#{name}//bla ", [ + T("HASH", (1, 1)), + T("IDENTIFIER", (1, 2), value=name), + T("COMMENT", (1, 2 + len(name)), value="//bla "), + ]], + } + for name in ("define", "error", "ifndef", "ifdef", "include", "pragma", "undef") + )))), +})) +def test_lexer_tokens(source: str, expected_tokens: List[T]): + lexer = lexer_from_source(source) + tokens = list(lexer) + + assert tokens == expected_tokens diff --git a/norminette/tests/lexer/files/chars_and_strings_constants.c b/tests/tokenizer/samples/ok/chars_and_strings_constants.c similarity index 100% rename from norminette/tests/lexer/files/chars_and_strings_constants.c rename to tests/tokenizer/samples/ok/chars_and_strings_constants.c diff --git a/norminette/tests/lexer/files/chars_and_strings_constants.tokens b/tests/tokenizer/samples/ok/chars_and_strings_constants.tokens similarity index 93% rename from norminette/tests/lexer/files/chars_and_strings_constants.tokens rename to tests/tokenizer/samples/ok/chars_and_strings_constants.tokens index 58806dae..6089f9f2 100644 --- a/norminette/tests/lexer/files/chars_and_strings_constants.tokens +++ b/tests/tokenizer/samples/ok/chars_and_strings_constants.tokens @@ -1,4 +1,4 @@ -> + diff --git a/norminette/tests/lexer/files/num_constants.c b/tests/tokenizer/samples/ok/num_constants.c similarity index 100% rename from norminette/tests/lexer/files/num_constants.c rename to tests/tokenizer/samples/ok/num_constants.c diff --git a/norminette/tests/lexer/files/num_constants.tokens b/tests/tokenizer/samples/ok/num_constants.tokens similarity index 100% rename from norminette/tests/lexer/files/num_constants.tokens rename to tests/tokenizer/samples/ok/num_constants.tokens diff --git a/norminette/tests/lexer/files/ok_test_01.c b/tests/tokenizer/samples/ok/ok_test_01.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_01.c rename to tests/tokenizer/samples/ok/ok_test_01.c diff --git a/norminette/tests/lexer/files/ok_test_01.tokens b/tests/tokenizer/samples/ok/ok_test_01.tokens similarity index 100% rename from norminette/tests/lexer/files/ok_test_01.tokens rename to tests/tokenizer/samples/ok/ok_test_01.tokens diff --git a/norminette/tests/lexer/files/ok_test_02.c b/tests/tokenizer/samples/ok/ok_test_02.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_02.c rename to tests/tokenizer/samples/ok/ok_test_02.c diff --git a/norminette/tests/lexer/files/ok_test_02.tokens b/tests/tokenizer/samples/ok/ok_test_02.tokens similarity index 100% rename from norminette/tests/lexer/files/ok_test_02.tokens rename to tests/tokenizer/samples/ok/ok_test_02.tokens diff --git a/norminette/tests/lexer/files/ok_test_03.c b/tests/tokenizer/samples/ok/ok_test_03.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_03.c rename to tests/tokenizer/samples/ok/ok_test_03.c diff --git a/norminette/tests/lexer/files/ok_test_03.tokens b/tests/tokenizer/samples/ok/ok_test_03.tokens similarity index 100% rename from norminette/tests/lexer/files/ok_test_03.tokens rename to tests/tokenizer/samples/ok/ok_test_03.tokens diff --git a/norminette/tests/lexer/files/ok_test_04.c b/tests/tokenizer/samples/ok/ok_test_04.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_04.c rename to tests/tokenizer/samples/ok/ok_test_04.c diff --git a/norminette/tests/lexer/files/ok_test_04.tokens b/tests/tokenizer/samples/ok/ok_test_04.tokens similarity index 100% rename from norminette/tests/lexer/files/ok_test_04.tokens rename to tests/tokenizer/samples/ok/ok_test_04.tokens diff --git a/norminette/tests/lexer/files/ok_test_05.c b/tests/tokenizer/samples/ok/ok_test_05.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_05.c rename to tests/tokenizer/samples/ok/ok_test_05.c diff --git a/tests/tokenizer/samples/ok/ok_test_05.tokens b/tests/tokenizer/samples/ok/ok_test_05.tokens new file mode 100644 index 00000000..7615b283 --- /dev/null +++ b/tests/tokenizer/samples/ok/ok_test_05.tokens @@ -0,0 +1,13 @@ + + + + + + + + + + diff --git a/norminette/tests/lexer/files/ok_test_06.c b/tests/tokenizer/samples/ok/ok_test_06.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_06.c rename to tests/tokenizer/samples/ok/ok_test_06.c diff --git a/norminette/tests/lexer/files/ok_test_06.tokens b/tests/tokenizer/samples/ok/ok_test_06.tokens similarity index 100% rename from norminette/tests/lexer/files/ok_test_06.tokens rename to tests/tokenizer/samples/ok/ok_test_06.tokens diff --git a/norminette/tests/lexer/files/ok_test_07.c b/tests/tokenizer/samples/ok/ok_test_07.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_07.c rename to tests/tokenizer/samples/ok/ok_test_07.c diff --git a/norminette/tests/lexer/files/ok_test_07.tokens b/tests/tokenizer/samples/ok/ok_test_07.tokens similarity index 100% rename from norminette/tests/lexer/files/ok_test_07.tokens rename to tests/tokenizer/samples/ok/ok_test_07.tokens diff --git a/norminette/tests/lexer/files/ok_test_08.c b/tests/tokenizer/samples/ok/ok_test_08.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_08.c rename to tests/tokenizer/samples/ok/ok_test_08.c diff --git a/norminette/tests/lexer/files/ok_test_08.tokens b/tests/tokenizer/samples/ok/ok_test_08.tokens similarity index 100% rename from norminette/tests/lexer/files/ok_test_08.tokens rename to tests/tokenizer/samples/ok/ok_test_08.tokens diff --git a/norminette/tests/lexer/files/ok_test_09.c b/tests/tokenizer/samples/ok/ok_test_09.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_09.c rename to tests/tokenizer/samples/ok/ok_test_09.c diff --git a/norminette/tests/lexer/files/ok_test_09.tokens b/tests/tokenizer/samples/ok/ok_test_09.tokens similarity index 100% rename from norminette/tests/lexer/files/ok_test_09.tokens rename to tests/tokenizer/samples/ok/ok_test_09.tokens diff --git a/norminette/tests/lexer/files/ok_test_10.c b/tests/tokenizer/samples/ok/ok_test_10.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_10.c rename to tests/tokenizer/samples/ok/ok_test_10.c diff --git a/norminette/tests/lexer/files/ok_test_10.tokens b/tests/tokenizer/samples/ok/ok_test_10.tokens similarity index 100% rename from norminette/tests/lexer/files/ok_test_10.tokens rename to tests/tokenizer/samples/ok/ok_test_10.tokens diff --git a/norminette/tests/lexer/files/ok_test_11.c b/tests/tokenizer/samples/ok/ok_test_11.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_11.c rename to tests/tokenizer/samples/ok/ok_test_11.c diff --git a/norminette/tests/lexer/files/ok_test_11.tokens b/tests/tokenizer/samples/ok/ok_test_11.tokens similarity index 100% rename from norminette/tests/lexer/files/ok_test_11.tokens rename to tests/tokenizer/samples/ok/ok_test_11.tokens diff --git a/norminette/tests/lexer/files/ok_test_12.c b/tests/tokenizer/samples/ok/ok_test_12.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_12.c rename to tests/tokenizer/samples/ok/ok_test_12.c diff --git a/norminette/tests/lexer/files/ok_test_12.tokens b/tests/tokenizer/samples/ok/ok_test_12.tokens similarity index 100% rename from norminette/tests/lexer/files/ok_test_12.tokens rename to tests/tokenizer/samples/ok/ok_test_12.tokens diff --git a/norminette/tests/lexer/files/ok_test_13.c b/tests/tokenizer/samples/ok/ok_test_13.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_13.c rename to tests/tokenizer/samples/ok/ok_test_13.c diff --git a/norminette/tests/lexer/files/ok_test_13.tokens b/tests/tokenizer/samples/ok/ok_test_13.tokens similarity index 100% rename from norminette/tests/lexer/files/ok_test_13.tokens rename to tests/tokenizer/samples/ok/ok_test_13.tokens diff --git a/norminette/tests/lexer/files/ok_test_14.c b/tests/tokenizer/samples/ok/ok_test_14.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_14.c rename to tests/tokenizer/samples/ok/ok_test_14.c diff --git a/norminette/tests/lexer/files/ok_test_14.tokens b/tests/tokenizer/samples/ok/ok_test_14.tokens similarity index 100% rename from norminette/tests/lexer/files/ok_test_14.tokens rename to tests/tokenizer/samples/ok/ok_test_14.tokens diff --git a/norminette/tests/lexer/files/ok_test_15.c b/tests/tokenizer/samples/ok/ok_test_15.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_15.c rename to tests/tokenizer/samples/ok/ok_test_15.c diff --git a/norminette/tests/lexer/files/ok_test_15.tokens b/tests/tokenizer/samples/ok/ok_test_15.tokens similarity index 100% rename from norminette/tests/lexer/files/ok_test_15.tokens rename to tests/tokenizer/samples/ok/ok_test_15.tokens diff --git a/norminette/tests/lexer/files/ok_test_16.c b/tests/tokenizer/samples/ok/ok_test_16.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_16.c rename to tests/tokenizer/samples/ok/ok_test_16.c diff --git a/norminette/tests/lexer/files/ok_test_16.tokens b/tests/tokenizer/samples/ok/ok_test_16.tokens similarity index 100% rename from norminette/tests/lexer/files/ok_test_16.tokens rename to tests/tokenizer/samples/ok/ok_test_16.tokens diff --git a/norminette/tests/lexer/files/ok_test_17.c b/tests/tokenizer/samples/ok/ok_test_17.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_17.c rename to tests/tokenizer/samples/ok/ok_test_17.c diff --git a/norminette/tests/lexer/files/ok_test_17.tokens b/tests/tokenizer/samples/ok/ok_test_17.tokens similarity index 100% rename from norminette/tests/lexer/files/ok_test_17.tokens rename to tests/tokenizer/samples/ok/ok_test_17.tokens diff --git a/norminette/tests/lexer/files/ok_test_18.c b/tests/tokenizer/samples/ok/ok_test_18.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_18.c rename to tests/tokenizer/samples/ok/ok_test_18.c diff --git a/norminette/tests/lexer/files/ok_test_18.tokens b/tests/tokenizer/samples/ok/ok_test_18.tokens similarity index 100% rename from norminette/tests/lexer/files/ok_test_18.tokens rename to tests/tokenizer/samples/ok/ok_test_18.tokens diff --git a/norminette/tests/lexer/files/ok_test_19.c b/tests/tokenizer/samples/ok/ok_test_19.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_19.c rename to tests/tokenizer/samples/ok/ok_test_19.c diff --git a/norminette/tests/lexer/files/ok_test_19.tokens b/tests/tokenizer/samples/ok/ok_test_19.tokens similarity index 100% rename from norminette/tests/lexer/files/ok_test_19.tokens rename to tests/tokenizer/samples/ok/ok_test_19.tokens diff --git a/norminette/tests/lexer/files/ok_test_20.c b/tests/tokenizer/samples/ok/ok_test_20.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_20.c rename to tests/tokenizer/samples/ok/ok_test_20.c diff --git a/norminette/tests/lexer/files/ok_test_20.tokens b/tests/tokenizer/samples/ok/ok_test_20.tokens similarity index 100% rename from norminette/tests/lexer/files/ok_test_20.tokens rename to tests/tokenizer/samples/ok/ok_test_20.tokens diff --git a/norminette/tests/lexer/files/ok_test_21.c b/tests/tokenizer/samples/ok/ok_test_21.c similarity index 100% rename from norminette/tests/lexer/files/ok_test_21.c rename to tests/tokenizer/samples/ok/ok_test_21.c diff --git a/tests/tokenizer/samples/ok/ok_test_21.tokens b/tests/tokenizer/samples/ok/ok_test_21.tokens new file mode 100644 index 00000000..4dd1a53b --- /dev/null +++ b/tests/tokenizer/samples/ok/ok_test_21.tokens @@ -0,0 +1,4 @@ + + + + diff --git a/tests/tokenizer/samples/ok/ok_test_22.c b/tests/tokenizer/samples/ok/ok_test_22.c new file mode 100644 index 00000000..0864d7a7 --- /dev/null +++ b/tests/tokenizer/samples/ok/ok_test_22.c @@ -0,0 +1,7 @@ +/*\*/ + +int main() { + int a = 1; +} + +/**/ diff --git a/tests/tokenizer/samples/ok/ok_test_22.tokens b/tests/tokenizer/samples/ok/ok_test_22.tokens new file mode 100644 index 00000000..75ba4a89 --- /dev/null +++ b/tests/tokenizer/samples/ok/ok_test_22.tokens @@ -0,0 +1,7 @@ + + + + + + + diff --git a/norminette/tests/lexer/files/ok_test_8.tokens b/tests/tokenizer/samples/ok/ok_test_8.tokens similarity index 100% rename from norminette/tests/lexer/files/ok_test_8.tokens rename to tests/tokenizer/samples/ok/ok_test_8.tokens diff --git a/tests/tokenizer/token_generator_test.py b/tests/tokenizer/token_generator_test.py new file mode 100644 index 00000000..67d91236 --- /dev/null +++ b/tests/tokenizer/token_generator_test.py @@ -0,0 +1,28 @@ +import pytest +import glob + +from norminette.file import File +from norminette.lexer import Lexer +from norminette.registry import Registry + + +registry = Registry() +test_files = glob.glob("tests/tokenizer/samples/ok/*.[ch]") + + +@pytest.mark.parametrize("file", test_files) +def test_rule_for_file(file): + with open(f"{file.split('.')[0]}.tokens") as out_file: + out_content = out_file.read() + + lexer = Lexer(File(file)) + + output = '' + tokens = list(lexer) + if tokens: + for token in tokens: + output += str(token) + '\n' * int(token.type == "NEWLINE") + if tokens[-1].type != "NEWLINE": + output += "\n" + + assert output == out_content diff --git a/tests/utils.py b/tests/utils.py new file mode 100644 index 00000000..ecf7631b --- /dev/null +++ b/tests/utils.py @@ -0,0 +1,20 @@ +from typing import Dict, Any, List + +import pytest +from _pytest.mark.structures import ParameterSet + +from norminette.file import File +from norminette.lexer import Lexer + + +def lexer_from_source(source: str, /) -> Lexer: + file = File("", source) + return Lexer(file) + + +def dict_to_pytest_param(data: Dict[str, List[Any]]) -> List[ParameterSet]: + params: List[ParameterSet] = [] + for id, values in data.items(): + param = pytest.param(*values, id=id) + params.append(param) + return params