diff --git a/post-training/.gitattributes b/post-training/.gitattributes new file mode 100644 index 0000000..fdeae08 --- /dev/null +++ b/post-training/.gitattributes @@ -0,0 +1,3 @@ +*.safetensors filter=lfs diff=lfs merge=lfs -text +rl.tar.gz* filter=lfs diff=lfs merge=lfs -text +vllm.tar.gz* filter=lfs diff=lfs merge=lfs -text diff --git a/post-training/.gitignore b/post-training/.gitignore new file mode 100644 index 0000000..27f2bb0 --- /dev/null +++ b/post-training/.gitignore @@ -0,0 +1,56 @@ +# OS / IDE +.DS_Store +.idea/ +.vscode/ +*.swp + +# Python +__pycache__/ +*.pyc +*.pyo +*.pyd +.ipynb_checkpoints/ +.venv/ +venv/ +.env +.env.* + +# Logs & outputs +logs/ +output/ +outputs/ +runs/ +wandb/ +*.log + +# Data & checkpoints (large files) +data/ +checkpoints/ +experiments/ +*.ckpt +*.safetensors +*.pt +*.bin + +# HuggingFace / caches +hf_cache/ +ms_cache/ +om_cache/ +**/.cache/ + +# Conda env archives +conda_envs/*.tar.gz +conda_envs/*.tar.gz.part.* + +# LLaMA-Factory artifacts +LLaMA-Factory/output/ +LLaMA-Factory/saves/ +LLaMA-Factory/.cache/ + +# VLM-R1 artifacts +VLM-R1/output/ +VLM-R1/.cache/ + +# vLLM +vllm/*.json +vllm/*.log diff --git a/post-training/LLaMA-Factory/.dockerignore b/post-training/LLaMA-Factory/.dockerignore new file mode 100644 index 0000000..bc56ab8 --- /dev/null +++ b/post-training/LLaMA-Factory/.dockerignore @@ -0,0 +1,15 @@ +.vscode +.git +.github +.venv +cache +data +docker +saves +hf_cache +ms_cache +om_cache +output +.dockerignore +.gitattributes +.gitignore diff --git a/post-training/LLaMA-Factory/.gitattributes b/post-training/LLaMA-Factory/.gitattributes new file mode 100644 index 0000000..dfe0770 --- /dev/null +++ b/post-training/LLaMA-Factory/.gitattributes @@ -0,0 +1,2 @@ +# Auto detect text files and perform LF normalization +* text=auto diff --git a/post-training/LLaMA-Factory/.github/CODE_OF_CONDUCT.md b/post-training/LLaMA-Factory/.github/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..c2035ce --- /dev/null +++ b/post-training/LLaMA-Factory/.github/CODE_OF_CONDUCT.md @@ -0,0 +1,128 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +`hoshihiyouga AT gmail DOT com`. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. diff --git a/post-training/LLaMA-Factory/.github/CONTRIBUTING.md b/post-training/LLaMA-Factory/.github/CONTRIBUTING.md new file mode 100644 index 0000000..507d666 --- /dev/null +++ b/post-training/LLaMA-Factory/.github/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# Contributing to LLaMA Factory + +Everyone is welcome to contribute, and we value everybody's contribution. Code contributions are not the only way to help the community. Answering questions, helping others, and improving the documentation are also immensely valuable. + +It also helps us if you spread the word! Reference the library in blog posts about the awesome projects it made possible, shout out on Twitter every time it has helped you, or simply ⭐️ the repository to say thank you. + +However you choose to contribute, please be mindful and respect our [code of conduct](CODE_OF_CONDUCT.md). + +**This guide was heavily inspired by [transformers guide to contributing](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md).** + +## Ways to contribute + +There are several ways you can contribute to LLaMA Factory: + +* Fix outstanding issues with the existing code. +* Submit issues related to bugs or desired new features. +* Contribute to the examples or to the documentation. + +### Style guide + +LLaMA Factory follows the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html), check it for details. + +### Create a Pull Request + +1. Fork the [repository](https://github.com/hiyouga/LLaMA-Factory) by clicking on the [Fork](https://github.com/hiyouga/LLaMA-Factory/fork) button on the repository's page. This creates a copy of the code under your GitHub user account. + +2. Clone your fork to your local disk, and add the base repository as a remote: + +```bash +git clone git@github.com:[username]/LLaMA-Factory.git +cd LLaMA-Factory +git remote add upstream https://github.com/hiyouga/LLaMA-Factory.git +``` + +3. Create a new branch to hold your development changes: + +```bash +git checkout -b dev_your_branch +``` + +4. Set up a development environment by running the following command in a virtual environment: + +```bash +pip install -e ".[dev]" +``` + +If LLaMA Factory was already installed in the virtual environment, remove it with `pip uninstall llamafactory` before reinstalling it in editable mode with the -e flag. + +5. Check code before commit: + +```bash +make commit +make style && make quality +make test +``` + +6. Submit changes: + +```bash +git add . +git commit -m "commit message" +git fetch upstream +git rebase upstream/main +git push -u origin dev_your_branch +``` + +7. Create a merge request from your branch `dev_your_branch` at [origin repo](https://github.com/hiyouga/LLaMA-Factory). diff --git a/post-training/LLaMA-Factory/.github/ISSUE_TEMPLATE/1-bug-report.yml b/post-training/LLaMA-Factory/.github/ISSUE_TEMPLATE/1-bug-report.yml new file mode 100644 index 0000000..a08596f --- /dev/null +++ b/post-training/LLaMA-Factory/.github/ISSUE_TEMPLATE/1-bug-report.yml @@ -0,0 +1,61 @@ +name: "\U0001F41B Bug / help" +description: Create a report to help us improve the LLaMA Factory +labels: ["bug", "pending"] +body: + - type: markdown + attributes: + value: | + Issues included in **[FAQs](https://github.com/hiyouga/LLaMA-Factory/issues/4614)** or those with **insufficient** information may be closed without a response. + 已经包含在 **[常见问题](https://github.com/hiyouga/LLaMA-Factory/issues/4614)** 内或提供信息**不完整**的 issues 可能不会被回复。 + + - type: markdown + attributes: + value: | + Please do not create issues that are not related to framework bugs under this category, use **[Discussions](https://github.com/hiyouga/LLaMA-Factory/discussions/categories/q-a)** instead. + 请勿在此分类下创建和框架 bug 无关的 issues,训练问题求助请使用 **[讨论区](https://github.com/hiyouga/LLaMA-Factory/discussions/categories/q-a)**。 + + - type: checkboxes + id: reminder + attributes: + label: Reminder + description: | + Please ensure you have read the above rules carefully and searched the existing issues (including FAQs). + 请确保您已经认真阅读了上述规则并且搜索过现有的 issues(包括常见问题)。 + + options: + - label: I have read the above rules and searched the existing issues. + required: true + + - type: textarea + id: system-info + validations: + required: true + attributes: + label: System Info + description: | + Please share your system info with us. You can run the command **llamafactory-cli env** and copy-paste its output below. + 请提供您的系统信息。您可以在命令行运行 **llamafactory-cli env** 并将其输出复制到该文本框中。 + + placeholder: llamafactory version, platform, python version, ... + + - type: textarea + id: reproduction + validations: + required: true + attributes: + label: Reproduction + description: | + Please provide entry arguments, error messages and stack traces that reproduces the problem. + 请提供入口参数,错误日志以及异常堆栈以便于我们复现问题。 + + value: | + ```text + Put your message here. + ``` + + - type: textarea + id: others + validations: + required: false + attributes: + label: Others diff --git a/post-training/LLaMA-Factory/.github/ISSUE_TEMPLATE/2-feature-request.yml b/post-training/LLaMA-Factory/.github/ISSUE_TEMPLATE/2-feature-request.yml new file mode 100644 index 0000000..5d72271 --- /dev/null +++ b/post-training/LLaMA-Factory/.github/ISSUE_TEMPLATE/2-feature-request.yml @@ -0,0 +1,41 @@ +name: "\U0001F680 Feature request" +description: Submit a request for a new feature +labels: ["enhancement", "pending"] +body: + - type: markdown + attributes: + value: | + Please do not create issues that are not related to new features under this category. + 请勿在此分类下创建和新特性无关的 issues。 + + - type: checkboxes + id: reminder + attributes: + label: Reminder + description: | + Please ensure you have read the above rules carefully and searched the existing issues. + 请确保您已经认真阅读了上述规则并且搜索过现有的 issues。 + + options: + - label: I have read the above rules and searched the existing issues. + required: true + + - type: textarea + id: description + validations: + required: true + attributes: + label: Description + description: | + A clear and concise description of the feature proposal. + 请详细描述您希望加入的新功能特性。 + + - type: textarea + id: contribution + validations: + required: false + attributes: + label: Pull Request + description: | + Have you already created the relevant PR and submitted the code? + 您是否已经创建了相关 PR 并提交了代码? diff --git a/post-training/LLaMA-Factory/.github/ISSUE_TEMPLATE/config.yml b/post-training/LLaMA-Factory/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..3ba13e0 --- /dev/null +++ b/post-training/LLaMA-Factory/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1 @@ +blank_issues_enabled: false diff --git a/post-training/LLaMA-Factory/.github/PULL_REQUEST_TEMPLATE.md b/post-training/LLaMA-Factory/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..d23d6be --- /dev/null +++ b/post-training/LLaMA-Factory/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,8 @@ +# What does this PR do? + +Fixes # (issue) + +## Before submitting + +- [ ] Did you read the [contributor guideline](https://github.com/hiyouga/LLaMA-Factory/blob/main/.github/CONTRIBUTING.md)? +- [ ] Did you write any new necessary tests? diff --git a/post-training/LLaMA-Factory/.github/SECURITY.md b/post-training/LLaMA-Factory/.github/SECURITY.md new file mode 100644 index 0000000..d34728e --- /dev/null +++ b/post-training/LLaMA-Factory/.github/SECURITY.md @@ -0,0 +1,7 @@ +# Reporting Security Issues + +To report a security issue, please use the GitHub Security Advisory ["Report a Vulnerability"](https://github.com/hiyouga/LLaMA-Factory/security/advisories/new) tab. + +We will send a response indicating the next steps in handling your report. After the initial reply to your report, the security team will keep you informed of the progress towards a fix and full announcement, and may ask for additional information or guidance. + +Report security bugs in third-party modules to the person or team maintaining the module. diff --git a/post-training/LLaMA-Factory/.github/workflows/label_issue.yml b/post-training/LLaMA-Factory/.github/workflows/label_issue.yml new file mode 100644 index 0000000..b7469f6 --- /dev/null +++ b/post-training/LLaMA-Factory/.github/workflows/label_issue.yml @@ -0,0 +1,32 @@ +name: label_issue + +on: + issues: + types: + - opened + +jobs: + label_issue: + runs-on: ubuntu-latest + + permissions: + issues: write + + steps: + - env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + ISSUE_URL: ${{ github.event.issue.html_url }} + ISSUE_TITLE: ${{ github.event.issue.title }} + run: | + LABEL="" + NPU_KEYWORDS=(npu huawei ascend 华为 昇腾) + ISSUE_TITLE_LOWER=$(echo $ISSUE_TITLE | tr '[:upper:]' '[:lower:]') + for KEYWORD in ${NPU_KEYWORDS[@]}; do + if [[ $ISSUE_TITLE_LOWER == *$KEYWORD* ]] && [[ $ISSUE_TITLE_LOWER != *input* ]]; then + LABEL="npu" + break + fi + done + if [ -n "$LABEL" ]; then + gh issue edit $ISSUE_URL --add-label $LABEL + fi diff --git a/post-training/LLaMA-Factory/.github/workflows/publish.yml b/post-training/LLaMA-Factory/.github/workflows/publish.yml new file mode 100644 index 0000000..c3f729a --- /dev/null +++ b/post-training/LLaMA-Factory/.github/workflows/publish.yml @@ -0,0 +1,36 @@ +name: publish + +on: + workflow_dispatch: + release: + types: + - published + +jobs: + publish: + name: Upload release to PyPI + + runs-on: ubuntu-latest + + environment: + name: release + url: https://pypi.org/p/llamafactory + + permissions: + id-token: write + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Build package + run: | + make build + + - name: Publish package + uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/post-training/LLaMA-Factory/.github/workflows/tests.yml b/post-training/LLaMA-Factory/.github/workflows/tests.yml new file mode 100644 index 0000000..84920f4 --- /dev/null +++ b/post-training/LLaMA-Factory/.github/workflows/tests.yml @@ -0,0 +1,99 @@ +name: tests + +on: + workflow_dispatch: + push: + branches: + - "main" + paths: + - "**.py" + - "requirements.txt" + - ".github/workflows/*.yml" + pull_request: + branches: + - "main" + paths: + - "**.py" + - "requirements.txt" + - ".github/workflows/*.yml" + +jobs: + tests: + strategy: + fail-fast: false + matrix: + python: + - "3.9" + - "3.10" + - "3.11" + - "3.12" + os: + - "ubuntu-latest" + - "windows-latest" + - "macos-13" + transformers: + - null + include: # test backward compatibility + - python: "3.9" + os: "ubuntu-latest" + transformers: "4.45.0" + - python: "3.9" + os: "ubuntu-latest" + transformers: "4.49.0" + + runs-on: ${{ matrix.os }} + + concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.os }}-${{ matrix.python }}-${{ matrix.transformers }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + + env: + HF_TOKEN: ${{ secrets.HF_TOKEN }} + OS_NAME: ${{ matrix.os }} + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + cache: "pip" + cache-dependency-path: "**/requirements*.txt" + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install ".[torch,dev]" + + - name: Install transformers + if: ${{ matrix.transformers }} + run: | + python -m pip install "transformers==${{ matrix.transformers }}" + + - name: Cache files + id: hf-hub-cache + uses: actions/cache@v4 + with: + path: ${{ runner.temp }}/huggingface + key: huggingface-${{ matrix.os }}-${{ matrix.python }}-${{ matrix.transformers }}-${{ hashFiles('tests/version.txt') }} + + - name: Check quality + run: | + make style && make quality + + - name: Check license + run: | + make license + + - name: Check build + run: | + make build + + - name: Test with pytest + run: | + make test + env: + HF_HOME: ${{ runner.temp }}/huggingface + HF_HUB_OFFLINE: "${{ steps.hf-hub-cache.outputs.cache-hit == 'true' && '1' || '0' }}" diff --git a/post-training/LLaMA-Factory/.gitignore b/post-training/LLaMA-Factory/.gitignore new file mode 100644 index 0000000..8397a78 --- /dev/null +++ b/post-training/LLaMA-Factory/.gitignore @@ -0,0 +1,177 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +.idea/ + +# vscode +.vscode/ + +# uv +uv.lock + +# custom .gitignore +ms_cache/ +hf_cache/ +om_cache/ +cache/ +config/ +output/ +wandb/ +swanlog/ +generated_predictions.jsonl diff --git a/post-training/LLaMA-Factory/.pre-commit-config.yaml b/post-training/LLaMA-Factory/.pre-commit-config.yaml new file mode 100644 index 0000000..cbe361e --- /dev/null +++ b/post-training/LLaMA-Factory/.pre-commit-config.yaml @@ -0,0 +1,28 @@ +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: check-ast + - id: check-added-large-files + args: ['--maxkb=25000'] + - id: check-merge-conflict + - id: check-yaml + - id: debug-statements + - id: end-of-file-fixer + - id: trailing-whitespace + args: [--markdown-linebreak-ext=md] + - id: no-commit-to-branch + args: ['--branch', 'main'] + +- repo: https://github.com/asottile/pyupgrade + rev: v3.17.0 + hooks: + - id: pyupgrade + args: [--py38-plus] + +- repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.6.9 + hooks: + - id: ruff + args: [--fix] + - id: ruff-format diff --git a/post-training/LLaMA-Factory/CITATION.cff b/post-training/LLaMA-Factory/CITATION.cff new file mode 100644 index 0000000..01b4c9f --- /dev/null +++ b/post-training/LLaMA-Factory/CITATION.cff @@ -0,0 +1,44 @@ +cff-version: 1.2.0 +date-released: 2024-03 +message: "If you use this software, please cite it as below." +authors: +- family-names: "Zheng" + given-names: "Yaowei" +- family-names: "Zhang" + given-names: "Richong" +- family-names: "Zhang" + given-names: "Junhao" +- family-names: "Ye" + given-names: "Yanhan" +- family-names: "Luo" + given-names: "Zheyan" +- family-names: "Feng" + given-names: "Zhangchi" +- family-names: "Ma" + given-names: "Yongqiang" +title: "LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models" +url: "https://arxiv.org/abs/2403.13372" +preferred-citation: + type: conference-paper + conference: + name: "Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)" + authors: + - family-names: "Zheng" + given-names: "Yaowei" + - family-names: "Zhang" + given-names: "Richong" + - family-names: "Zhang" + given-names: "Junhao" + - family-names: "Ye" + given-names: "Yanhan" + - family-names: "Luo" + given-names: "Zheyan" + - family-names: "Feng" + given-names: "Zhangchi" + - family-names: "Ma" + given-names: "Yongqiang" + title: "LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models" + url: "https://arxiv.org/abs/2403.13372" + year: 2024 + publisher: "Association for Computational Linguistics" + address: "Bangkok, Thailand" diff --git a/post-training/LLaMA-Factory/LICENSE b/post-training/LLaMA-Factory/LICENSE new file mode 100644 index 0000000..b09cd78 --- /dev/null +++ b/post-training/LLaMA-Factory/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/post-training/LLaMA-Factory/MANIFEST.in b/post-training/LLaMA-Factory/MANIFEST.in new file mode 100644 index 0000000..82c51f6 --- /dev/null +++ b/post-training/LLaMA-Factory/MANIFEST.in @@ -0,0 +1 @@ +include LICENSE requirements.txt diff --git a/post-training/LLaMA-Factory/Makefile b/post-training/LLaMA-Factory/Makefile new file mode 100644 index 0000000..2dcb7ca --- /dev/null +++ b/post-training/LLaMA-Factory/Makefile @@ -0,0 +1,24 @@ +.PHONY: build commit license quality style test + +check_dirs := scripts src tests setup.py + +build: + pip3 install build && python3 -m build + +commit: + pre-commit install + pre-commit run --all-files + +license: + python3 tests/check_license.py $(check_dirs) + +quality: + ruff check $(check_dirs) + ruff format --check $(check_dirs) + +style: + ruff check $(check_dirs) --fix + ruff format $(check_dirs) + +test: + CUDA_VISIBLE_DEVICES= WANDB_DISABLED=true pytest -vv tests/ diff --git a/post-training/LLaMA-Factory/README.md b/post-training/LLaMA-Factory/README.md new file mode 100644 index 0000000..05fdd97 --- /dev/null +++ b/post-training/LLaMA-Factory/README.md @@ -0,0 +1,915 @@ +![# LLaMA Factory](assets/logo.png) + +[![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/LLaMA-Factory?style=social)](https://github.com/hiyouga/LLaMA-Factory/stargazers) +[![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Factory)](https://github.com/hiyouga/LLaMA-Factory/commits/main) +[![GitHub contributors](https://img.shields.io/github/contributors/hiyouga/LLaMA-Factory?color=orange)](https://github.com/hiyouga/LLaMA-Factory/graphs/contributors) +[![GitHub workflow](https://github.com/hiyouga/LLaMA-Factory/actions/workflows/tests.yml/badge.svg)](https://github.com/hiyouga/LLaMA-Factory/actions/workflows/tests.yml) +[![PyPI](https://img.shields.io/pypi/v/llamafactory)](https://pypi.org/project/llamafactory/) +[![Citation](https://img.shields.io/badge/citation-392-green)](https://scholar.google.com/scholar?cites=12620864006390196564) +[![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Factory/pulls) + +[![Twitter](https://img.shields.io/twitter/follow/llamafactory_ai)](https://twitter.com/llamafactory_ai) +[![Discord](https://dcbadge.vercel.app/api/server/rKfvV9r9FK?compact=true&style=flat)](https://discord.gg/rKfvV9r9FK) +[![GitCode](https://gitcode.com/zhengyaowei/LLaMA-Factory/star/badge.svg)](https://gitcode.com/zhengyaowei/LLaMA-Factory) + +[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing) +[![Open in DSW](https://gallery.pai-ml.com/assets/open-in-dsw.svg)](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory) +[![Spaces](https://img.shields.io/badge/🤗-Open%20in%20Spaces-blue)](https://huggingface.co/spaces/hiyouga/LLaMA-Board) +[![Studios](https://img.shields.io/badge/ModelScope-Open%20in%20Studios-blue)](https://modelscope.cn/studios/hiyouga/LLaMA-Board) +[![SageMaker](https://img.shields.io/badge/SageMaker-Open%20in%20AWS-blue)](https://aws.amazon.com/cn/blogs/china/a-one-stop-code-free-model-fine-tuning-deployment-platform-based-on-sagemaker-and-llama-factory/) + +

+ Easily fine-tune 100+ large language models with zero-code CLI and Web UI +

+

+ + Github trend + +

+ +👋 Join our [WeChat](assets/wechat.jpg) or [NPU user group](assets/wechat_npu.jpg). + +\[ English | [中文](README_zh.md) \] + +**Fine-tuning a large language model can be easy as...** + +https://github.com/user-attachments/assets/3991a3a8-4276-4d30-9cab-4cb0c4b9b99e + +Choose your path: + +- **Documentation**: https://llamafactory.readthedocs.io/en/latest/ +- **Colab (free)**: https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing +- **Local machine**: Please refer to [usage](#getting-started) +- **PAI-DSW (free trial)**: [Llama3 Example](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory) | [Qwen2-VL Example](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_qwen2vl) | [DeepSeek-R1-Distill Example](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_deepseek_r1_distill_7b) +- **Amazon SageMaker**: [Blog](https://aws.amazon.com/cn/blogs/china/a-one-stop-code-free-model-fine-tuning-deployment-platform-based-on-sagemaker-and-llama-factory/) +- **Easy Dataset**: [Fine-tune on Synthetic Data](https://buaa-act.feishu.cn/wiki/GVzlwYcRFiR8OLkHbL6cQpYin7g) + +> [!NOTE] +> Except for the above links, all other websites are unauthorized third-party websites. Please carefully use them. + +## Table of Contents + +- [Features](#features) +- [Benchmark](#benchmark) +- [Changelog](#changelog) +- [Supported Models](#supported-models) +- [Supported Training Approaches](#supported-training-approaches) +- [Provided Datasets](#provided-datasets) +- [Requirement](#requirement) +- [Getting Started](#getting-started) + - [Installation](#installation) + - [Data Preparation](#data-preparation) + - [Quickstart](#quickstart) + - [Fine-Tuning with LLaMA Board GUI](#fine-tuning-with-llama-board-gui-powered-by-gradio) + - [Build Docker](#build-docker) + - [Deploy with OpenAI-style API and vLLM](#deploy-with-openai-style-api-and-vllm) + - [Download from ModelScope Hub](#download-from-modelscope-hub) + - [Download from Modelers Hub](#download-from-modelers-hub) + - [Use W&B Logger](#use-wb-logger) + - [Use SwanLab Logger](#use-swanlab-logger) +- [Projects using LLaMA Factory](#projects-using-llama-factory) +- [License](#license) +- [Citation](#citation) +- [Acknowledgement](#acknowledgement) + +## Features + +- **Various models**: LLaMA, LLaVA, Mistral, Mixtral-MoE, Qwen, Qwen2-VL, DeepSeek, Yi, Gemma, ChatGLM, Phi, etc. +- **Integrated methods**: (Continuous) pre-training, (multimodal) supervised fine-tuning, reward modeling, PPO, DPO, KTO, ORPO, etc. +- **Scalable resources**: 16-bit full-tuning, freeze-tuning, LoRA and 2/3/4/5/6/8-bit QLoRA via AQLM/AWQ/GPTQ/LLM.int8/HQQ/EETQ. +- **Advanced algorithms**: [GaLore](https://github.com/jiaweizzhao/GaLore), [BAdam](https://github.com/Ledzy/BAdam), [APOLLO](https://github.com/zhuhanqing/APOLLO), [Adam-mini](https://github.com/zyushun/Adam-mini), DoRA, LongLoRA, LLaMA Pro, Mixture-of-Depths, LoRA+, LoftQ and PiSSA. +- **Practical tricks**: [FlashAttention-2](https://github.com/Dao-AILab/flash-attention), [Unsloth](https://github.com/unslothai/unsloth), [Liger Kernel](https://github.com/linkedin/Liger-Kernel), RoPE scaling, NEFTune and rsLoRA. +- **Wide tasks**: Multi-turn dialogue, tool using, image understanding, visual grounding, video recognition, audio understanding, etc. +- **Experiment monitors**: LlamaBoard, TensorBoard, Wandb, MLflow, [SwanLab](https://github.com/SwanHubX/SwanLab), etc. +- **Faster inference**: OpenAI-style API, Gradio UI and CLI with [vLLM worker](https://github.com/vllm-project/vllm) or [SGLang worker](https://github.com/sgl-project/sglang). + +### Day-N Support for Fine-Tuning Cutting-Edge Models + +| Support Date | Model Name | +| ------------ | ------------------------------------------------------------ | +| Day 0 | Qwen2.5 / Qwen2.5-VL / Gemma 3 / InternLM 3 / MiniCPM-o-2.6 | +| Day 1 | Llama 3 / GLM-4 / Mistral Small / PaliGemma2 / Llama 4 | + +## Benchmark + +Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ptuning), LLaMA Factory's LoRA tuning offers up to **3.7 times faster** training speed with a better Rouge score on the advertising text generation task. By leveraging 4-bit quantization technique, LLaMA Factory's QLoRA further improves the efficiency regarding the GPU memory. + +![benchmark](assets/benchmark.svg) + +
Definitions + +- **Training Speed**: the number of training samples processed per second during the training. (bs=4, cutoff_len=1024) +- **Rouge Score**: Rouge-2 score on the development set of the [advertising text generation](https://aclanthology.org/D19-1321.pdf) task. (bs=4, cutoff_len=1024) +- **GPU Memory**: Peak GPU memory usage in 4-bit quantized training. (bs=1, cutoff_len=1024) +- We adopt `pre_seq_len=128` for ChatGLM's P-Tuning and `lora_rank=32` for LLaMA Factory's LoRA tuning. + +
+ +## Changelog + +[25/04/16] We supported fine-tuning the **[InternVL3](https://huggingface.co/OpenGVLab/InternVL3-8B)** model. See [PR #7258](https://github.com/hiyouga/LLaMA-Factory/pull/7258) to get started. + +[25/04/14] We supported fine-tuning the **[GLM-Z1](https://huggingface.co/THUDM/GLM-Z1-9B-0414)** and **[Kimi-VL](https://huggingface.co/moonshotai/Kimi-VL-A3B-Instruct)** models. + +[25/04/06] We supported fine-tuning the **[Llama 4](https://ai.meta.com/blog/llama-4-multimodal-intelligence/)** model. See [PR #7611](https://github.com/hiyouga/LLaMA-Factory/pull/7611) to get started. + +[25/03/31] We supported fine-tuning the **[Qwen2.5 Omni](https://qwenlm.github.io/blog/qwen2.5-omni/)** model. See [PR #7537](https://github.com/hiyouga/LLaMA-Factory/pull/7537) to get started. + +
Full Changelog + +[25/03/15] We supported **[SGLang](https://github.com/sgl-project/sglang)** as inference backend. Try `infer_backend: sglang` to accelerate inference. + +[25/03/12] We supported fine-tuning the **[Gemma 3](https://huggingface.co/blog/gemma3)** model. + +[25/02/24] Announcing **[EasyR1](https://github.com/hiyouga/EasyR1)**, an efficient, scalable and multi-modality RL training framework for efficient GRPO training. + +[25/02/11] We supported saving the **[Ollama](https://github.com/ollama/ollama)** modelfile when exporting the model checkpoints. See [examples](examples/README.md) for usage. + +[25/02/05] We supported fine-tuning the **[Qwen2-Audio](Qwen/Qwen2-Audio-7B-Instruct)** and **[MiniCPM-o-2.6](https://huggingface.co/openbmb/MiniCPM-o-2_6)** on audio understanding tasks. + +[25/01/31] We supported fine-tuning the **[DeepSeek-R1](https://huggingface.co/deepseek-ai/DeepSeek-R1)** and **[Qwen2.5-VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct)** models. + +[25/01/15] We supported **[APOLLO](https://arxiv.org/abs/2412.05270)** optimizer. See [examples](examples/README.md) for usage. + +[25/01/14] We supported fine-tuning the **[MiniCPM-o-2.6](https://huggingface.co/openbmb/MiniCPM-o-2_6)** and **[MiniCPM-V-2.6](https://huggingface.co/openbmb/MiniCPM-V-2_6)** models. Thank [@BUAADreamer](https://github.com/BUAADreamer)'s PR. + +[25/01/14] We supported fine-tuning the **[InternLM 3](https://huggingface.co/collections/internlm/)** models. Thank [@hhaAndroid](https://github.com/hhaAndroid)'s PR. + +[25/01/10] We supported fine-tuning the **[Phi-4](https://huggingface.co/microsoft/phi-4)** model. + +[24/12/21] We supported using **[SwanLab](https://github.com/SwanHubX/SwanLab)** for experiment tracking and visualization. See [this section](#use-swanlab-logger) for details. + +[24/11/27] We supported fine-tuning the **[Skywork-o1](https://huggingface.co/Skywork/Skywork-o1-Open-Llama-3.1-8B)** model and the **[OpenO1](https://huggingface.co/datasets/O1-OPEN/OpenO1-SFT)** dataset. + +[24/10/09] We supported downloading pre-trained models and datasets from the **[Modelers Hub](https://modelers.cn/models)**. See [this tutorial](#download-from-modelers-hub) for usage. + +[24/09/19] We supported fine-tuning the **[Qwen2.5](https://qwenlm.github.io/blog/qwen2.5/)** models. + +[24/08/30] We supported fine-tuning the **[Qwen2-VL](https://qwenlm.github.io/blog/qwen2-vl/)** models. Thank [@simonJJJ](https://github.com/simonJJJ)'s PR. + +[24/08/27] We supported **[Liger Kernel](https://github.com/linkedin/Liger-Kernel)**. Try `enable_liger_kernel: true` for efficient training. + +[24/08/09] We supported **[Adam-mini](https://github.com/zyushun/Adam-mini)** optimizer. See [examples](examples/README.md) for usage. Thank [@relic-yuexi](https://github.com/relic-yuexi)'s PR. + +[24/07/04] We supported [contamination-free packed training](https://github.com/MeetKai/functionary/tree/main/functionary/train/packing). Use `neat_packing: true` to activate it. Thank [@chuan298](https://github.com/chuan298)'s PR. + +[24/06/16] We supported **[PiSSA](https://arxiv.org/abs/2404.02948)** algorithm. See [examples](examples/README.md) for usage. + +[24/06/07] We supported fine-tuning the **[Qwen2](https://qwenlm.github.io/blog/qwen2/)** and **[GLM-4](https://github.com/THUDM/GLM-4)** models. + +[24/05/26] We supported **[SimPO](https://arxiv.org/abs/2405.14734)** algorithm for preference learning. See [examples](examples/README.md) for usage. + +[24/05/20] We supported fine-tuning the **PaliGemma** series models. Note that the PaliGemma models are pre-trained models, you need to fine-tune them with `paligemma` template for chat completion. + +[24/05/18] We supported **[KTO](https://arxiv.org/abs/2402.01306)** algorithm for preference learning. See [examples](examples/README.md) for usage. + +[24/05/14] We supported training and inference on the Ascend NPU devices. Check [installation](#installation) section for details. + +[24/04/26] We supported fine-tuning the **LLaVA-1.5** multimodal LLMs. See [examples](examples/README.md) for usage. + +[24/04/22] We provided a **[Colab notebook](https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing)** for fine-tuning the Llama-3 model on a free T4 GPU. Two Llama-3-derived models fine-tuned using LLaMA Factory are available at Hugging Face, check [Llama3-8B-Chinese-Chat](https://huggingface.co/shenzhi-wang/Llama3-8B-Chinese-Chat) and [Llama3-Chinese](https://huggingface.co/zhichen/Llama3-Chinese) for details. + +[24/04/21] We supported **[Mixture-of-Depths](https://arxiv.org/abs/2404.02258)** according to [AstraMindAI's implementation](https://github.com/astramind-ai/Mixture-of-depths). See [examples](examples/README.md) for usage. + +[24/04/16] We supported **[BAdam](https://arxiv.org/abs/2404.02827)** optimizer. See [examples](examples/README.md) for usage. + +[24/04/16] We supported **[unsloth](https://github.com/unslothai/unsloth)**'s long-sequence training (Llama-2-7B-56k within 24GB). It achieves **117%** speed and **50%** memory compared with FlashAttention-2, more benchmarks can be found in [this page](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison). + +[24/03/31] We supported **[ORPO](https://arxiv.org/abs/2403.07691)**. See [examples](examples/README.md) for usage. + +[24/03/21] Our paper "[LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models](https://arxiv.org/abs/2403.13372)" is available at arXiv! + +[24/03/20] We supported **FSDP+QLoRA** that fine-tunes a 70B model on 2x24GB GPUs. See [examples](examples/README.md) for usage. + +[24/03/13] We supported **[LoRA+](https://arxiv.org/abs/2402.12354)**. See [examples](examples/README.md) for usage. + +[24/03/07] We supported **[GaLore](https://arxiv.org/abs/2403.03507)** optimizer. See [examples](examples/README.md) for usage. + +[24/03/07] We integrated **[vLLM](https://github.com/vllm-project/vllm)** for faster and concurrent inference. Try `infer_backend: vllm` to enjoy **270%** inference speed. + +[24/02/28] We supported weight-decomposed LoRA (**[DoRA](https://arxiv.org/abs/2402.09353)**). Try `use_dora: true` to activate DoRA training. + +[24/02/15] We supported **block expansion** proposed by [LLaMA Pro](https://github.com/TencentARC/LLaMA-Pro). See [examples](examples/README.md) for usage. + +[24/02/05] Qwen1.5 (Qwen2 beta version) series models are supported in LLaMA-Factory. Check this [blog post](https://qwenlm.github.io/blog/qwen1.5/) for details. + +[24/01/18] We supported **agent tuning** for most models, equipping model with tool using abilities by fine-tuning with `dataset: glaive_toolcall_en`. + +[23/12/23] We supported **[unsloth](https://github.com/unslothai/unsloth)**'s implementation to boost LoRA tuning for the LLaMA, Mistral and Yi models. Try `use_unsloth: true` argument to activate unsloth patch. It achieves **170%** speed in our benchmark, check [this page](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison) for details. + +[23/12/12] We supported fine-tuning the latest MoE model **[Mixtral 8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1)** in our framework. See hardware requirement [here](#hardware-requirement). + +[23/12/01] We supported downloading pre-trained models and datasets from the **[ModelScope Hub](https://modelscope.cn/models)**. See [this tutorial](#download-from-modelscope-hub) for usage. + +[23/10/21] We supported **[NEFTune](https://arxiv.org/abs/2310.05914)** trick for fine-tuning. Try `neftune_noise_alpha: 5` argument to activate NEFTune. + +[23/09/27] We supported **$S^2$-Attn** proposed by [LongLoRA](https://github.com/dvlab-research/LongLoRA) for the LLaMA models. Try `shift_attn: true` argument to enable shift short attention. + +[23/09/23] We integrated MMLU, C-Eval and CMMLU benchmarks in this repo. See [examples](examples/README.md) for usage. + +[23/09/10] We supported **[FlashAttention-2](https://github.com/Dao-AILab/flash-attention)**. Try `flash_attn: fa2` argument to enable FlashAttention-2 if you are using RTX4090, A100 or H100 GPUs. + +[23/08/12] We supported **RoPE scaling** to extend the context length of the LLaMA models. Try `rope_scaling: linear` argument in training and `rope_scaling: dynamic` argument at inference to extrapolate the position embeddings. + +[23/08/11] We supported **[DPO training](https://arxiv.org/abs/2305.18290)** for instruction-tuned models. See [examples](examples/README.md) for usage. + +[23/07/31] We supported **dataset streaming**. Try `streaming: true` and `max_steps: 10000` arguments to load your dataset in streaming mode. + +[23/07/29] We released two instruction-tuned 13B models at Hugging Face. See these Hugging Face Repos ([LLaMA-2](https://huggingface.co/hiyouga/Llama-2-Chinese-13b-chat) / [Baichuan](https://huggingface.co/hiyouga/Baichuan-13B-sft)) for details. + +[23/07/18] We developed an **all-in-one Web UI** for training, evaluation and inference. Try `train_web.py` to fine-tune models in your Web browser. Thank [@KanadeSiina](https://github.com/KanadeSiina) and [@codemayq](https://github.com/codemayq) for their efforts in the development. + +[23/07/09] We released **[FastEdit](https://github.com/hiyouga/FastEdit)** ⚡🩹, an easy-to-use package for editing the factual knowledge of large language models efficiently. Please follow [FastEdit](https://github.com/hiyouga/FastEdit) if you are interested. + +[23/06/29] We provided a **reproducible example** of training a chat model using instruction-following datasets, see [Baichuan-7B-sft](https://huggingface.co/hiyouga/Baichuan-7B-sft) for details. + +[23/06/22] We aligned the [demo API](src/api_demo.py) with the [OpenAI's](https://platform.openai.com/docs/api-reference/chat) format where you can insert the fine-tuned model in **arbitrary ChatGPT-based applications**. + +[23/06/03] We supported quantized training and inference (aka **[QLoRA](https://github.com/artidoro/qlora)**). See [examples](examples/README.md) for usage. + +
+ +## Supported Models + +| Model | Model size | Template | +| ----------------------------------------------------------------- | -------------------------------- | ------------------- | +| [Baichuan 2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 | +| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - | +| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 | +| [Command R](https://huggingface.co/CohereForAI) | 35B/104B | cohere | +| [DeepSeek (Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek | +| [DeepSeek 2.5/3](https://huggingface.co/deepseek-ai) | 236B/671B | deepseek3 | +| [DeepSeek R1 (Distill)](https://huggingface.co/deepseek-ai) | 1.5B/7B/8B/14B/32B/70B/671B | deepseek3 | +| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon | +| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma | +| [Gemma 3](https://huggingface.co/google) | 1B/4B/12B/27B | gemma3/gemma (1B) | +| [GLM-4/GLM-4-0414/GLM-Z1](https://huggingface.co/THUDM) | 9B/32B | glm4 | +| [GPT-2](https://huggingface.co/openai-community) | 0.1B/0.4B/0.8B/1.5B | - | +| [Granite 3.0-3.3](https://huggingface.co/ibm-granite) | 1B/2B/3B/8B | granite3 | +| [Hunyuan](https://huggingface.co/tencent/) | 7B | hunyuan | +| [Index](https://huggingface.co/IndexTeam) | 1.9B | index | +| [InternLM 2-3](https://huggingface.co/internlm) | 7B/8B/20B | intern2 | +| [InternVL 2.5-3](https://huggingface.co/OpenGVLab)\*\* | 1B/2B/4B/8B/9B/14B/26B/38B/78B | intern_vl | +| [Kimi-VL](https://huggingface.co/moonshotai) | 16B | kimi_vl | +| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - | +| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 | +| [Llama 3-3.3](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 | +| [Llama 4](https://huggingface.co/meta-llama) | 109B/402B | llama4 | +| [Llama 3.2 Vision](https://huggingface.co/meta-llama) | 11B/90B | mllama | +| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava | +| [LLaVA-NeXT](https://huggingface.co/llava-hf) | 7B/8B/13B/34B/72B/110B | llava_next | +| [LLaVA-NeXT-Video](https://huggingface.co/llava-hf) | 7B/34B | llava_next_video | +| [MiniCPM](https://huggingface.co/openbmb) | 1B/2B/4B | cpm/cpm3 | +| [MiniCPM-o-2.6/MiniCPM-V-2.6](https://huggingface.co/openbmb) | 8B | minicpm_o/minicpm_v | +| [Ministral/Mistral-Nemo](https://huggingface.co/mistralai) | 8B/12B | ministral | +| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral | +| [Mistral Small](https://huggingface.co/mistralai) | 24B | mistral_small | +| [OLMo](https://huggingface.co/allenai) | 1B/7B | - | +| [PaliGemma/PaliGemma2](https://huggingface.co/google) | 3B/10B/28B | paligemma | +| [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - | +| [Phi-3/Phi-3.5](https://huggingface.co/microsoft) | 4B/14B | phi | +| [Phi-3-small](https://huggingface.co/microsoft) | 7B | phi_small | +| [Phi-4](https://huggingface.co/microsoft) | 14B | phi4 | +| [Pixtral](https://huggingface.co/mistralai) | 12B | pixtral | +| [Qwen (1-2.5) (Code/Math/MoE/QwQ)](https://huggingface.co/Qwen) | 0.5B/1.5B/3B/7B/14B/32B/72B/110B | qwen | +| [Qwen2-Audio](https://huggingface.co/Qwen) | 7B | qwen2_audio | +| [Qwen2.5-Omni](https://huggingface.co/Qwen)\*\* | 7B | qwen2_omni | +| [Qwen2-VL/Qwen2.5-VL/QVQ](https://huggingface.co/Qwen) | 2B/3B/7B/32B/72B | qwen2_vl | +| [Skywork o1](https://huggingface.co/Skywork) | 8B | skywork_o1 | +| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - | +| [TeleChat2](https://huggingface.co/Tele-AI) | 3B/7B/35B/115B | telechat2 | +| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse | +| [Yi/Yi-1.5 (Code)](https://huggingface.co/01-ai) | 1.5B/6B/9B/34B | yi | +| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl | +| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan | + +> [!NOTE] +> For the "base" models, the `template` argument can be chosen from `default`, `alpaca`, `vicuna` etc. But make sure to use the **corresponding template** for the "instruct/chat" models. +> +> Remember to use the **SAME** template in training and inference. +> +> \*: You should install the `transformers` from main branch and use `DISABLE_VERSION_CHECK=1` to skip version check. +> +> \*\*: You need to install a specific version of `transformers` to use the corresponding model. + +Please refer to [constants.py](src/llamafactory/extras/constants.py) for a full list of models we supported. + +You also can add a custom chat template to [template.py](src/llamafactory/data/template.py). + +## Supported Training Approaches + +| Approach | Full-tuning | Freeze-tuning | LoRA | QLoRA | +| ---------------------- | ------------------ | ------------------ | ------------------ | ------------------ | +| Pre-Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| Supervised Fine-Tuning | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| Reward Modeling | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| PPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| DPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| KTO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| ORPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| SimPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | + +> [!TIP] +> The implementation details of PPO can be found in [this blog](https://newfacade.github.io/notes-on-reinforcement-learning/17-ppo-trl.html). + +## Provided Datasets + +
Pre-training datasets + +- [Wiki Demo (en)](data/wiki_demo.txt) +- [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) +- [RedPajama V2 (en)](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-V2) +- [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220) +- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered) +- [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile) +- [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B) +- [FineWeb (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb) +- [FineWeb-Edu (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu) +- [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack) +- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata) + +
+ +
Supervised fine-tuning datasets + +- [Identity (en&zh)](data/identity.json) +- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca) +- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca-3) +- [Alpaca GPT4 (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM) +- [Glaive Function Calling V2 (en&zh)](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2) +- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima) +- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset) +- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN) +- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN) +- [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN) +- [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M) +- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M) +- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M) +- [UltraChat (en)](https://github.com/thunlp/UltraChat) +- [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus) +- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k) +- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT) +- [OpenOrca (en)](https://huggingface.co/datasets/Open-Orca/OpenOrca) +- [SlimOrca (en)](https://huggingface.co/datasets/Open-Orca/SlimOrca) +- [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct) +- [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M) +- [Wiki QA (en)](https://huggingface.co/datasets/wiki_qa) +- [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa) +- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn) +- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar) +- [deepctrl (en&zh)](https://www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data) +- [Advertise Generating (zh)](https://huggingface.co/datasets/HasturOfficial/adgen) +- [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k) +- [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4) +- [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k) +- [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct) +- [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m) +- [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k) +- [Cosmopedia (en)](https://huggingface.co/datasets/HuggingFaceTB/cosmopedia) +- [STEM (zh)](https://huggingface.co/datasets/hfl/stem_zh_instruction) +- [Ruozhiba (zh)](https://huggingface.co/datasets/hfl/ruozhiba_gpt4_turbo) +- [Neo-sft (zh)](https://huggingface.co/datasets/m-a-p/neo_sft_phase2) +- [Magpie-Pro-300K-Filtered (en)](https://huggingface.co/datasets/Magpie-Align/Magpie-Pro-300K-Filtered) +- [Magpie-ultra-v0.1 (en)](https://huggingface.co/datasets/argilla/magpie-ultra-v0.1) +- [WebInstructSub (en)](https://huggingface.co/datasets/TIGER-Lab/WebInstructSub) +- [OpenO1-SFT (en&zh)](https://huggingface.co/datasets/O1-OPEN/OpenO1-SFT) +- [Open-Thoughts (en)](https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k) +- [Open-R1-Math (en)](https://huggingface.co/datasets/open-r1/OpenR1-Math-220k) +- [Chinese-DeepSeek-R1-Distill (zh)](https://huggingface.co/datasets/Congliu/Chinese-DeepSeek-R1-Distill-data-110k-SFT) +- [LLaVA mixed (en&zh)](https://huggingface.co/datasets/BUAADreamer/llava-en-zh-300k) +- [Pokemon-gpt4o-captions (en&zh)](https://huggingface.co/datasets/jugg1024/pokemon-gpt4o-captions) +- [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de) +- [Dolly 15k (de)](https://huggingface.co/datasets/mayflowergmbh/dolly-15k_de) +- [Alpaca GPT4 (de)](https://huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de) +- [OpenSchnabeltier (de)](https://huggingface.co/datasets/mayflowergmbh/openschnabeltier_de) +- [Evol Instruct (de)](https://huggingface.co/datasets/mayflowergmbh/evol-instruct_de) +- [Dolphin (de)](https://huggingface.co/datasets/mayflowergmbh/dolphin_de) +- [Booksum (de)](https://huggingface.co/datasets/mayflowergmbh/booksum_de) +- [Airoboros (de)](https://huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de) +- [Ultrachat (de)](https://huggingface.co/datasets/mayflowergmbh/ultra-chat_de) + +
+ +
Preference datasets + +- [DPO mixed (en&zh)](https://huggingface.co/datasets/hiyouga/DPO-En-Zh-20k) +- [UltraFeedback (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized) +- [COIG-P (en&zh)](https://huggingface.co/datasets/m-a-p/COIG-P) +- [RLHF-V (en)](https://huggingface.co/datasets/openbmb/RLHF-V-Dataset) +- [VLFeedback (en)](https://huggingface.co/datasets/Zhihui/VLFeedback) +- [Orca DPO Pairs (en)](https://huggingface.co/datasets/Intel/orca_dpo_pairs) +- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf) +- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar) +- [Orca DPO (de)](https://huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de) +- [KTO mixed (en)](https://huggingface.co/datasets/argilla/kto-mix-15k) + +
+ +Some datasets require confirmation before using them, so we recommend logging in with your Hugging Face account using these commands. + +```bash +pip install --upgrade huggingface_hub +huggingface-cli login +``` + +## Requirement + +| Mandatory | Minimum | Recommend | +| ------------ | ------- | --------- | +| python | 3.9 | 3.10 | +| torch | 2.0.0 | 2.6.0 | +| transformers | 4.45.0 | 4.50.0 | +| datasets | 2.16.0 | 3.2.0 | +| accelerate | 0.34.0 | 1.2.1 | +| peft | 0.14.0 | 0.15.1 | +| trl | 0.8.6 | 0.9.6 | + +| Optional | Minimum | Recommend | +| ------------ | ------- | --------- | +| CUDA | 11.6 | 12.2 | +| deepspeed | 0.10.0 | 0.16.4 | +| bitsandbytes | 0.39.0 | 0.43.1 | +| vllm | 0.4.3 | 0.8.2 | +| flash-attn | 2.5.6 | 2.7.2 | + +### Hardware Requirement + +\* *estimated* + +| Method | Bits | 7B | 14B | 30B | 70B | `x`B | +| ------------------------------- | ---- | ----- | ----- | ----- | ------ | ------- | +| Full (`bf16` or `fp16`) | 32 | 120GB | 240GB | 600GB | 1200GB | `18x`GB | +| Full (`pure_bf16`) | 16 | 60GB | 120GB | 300GB | 600GB | `8x`GB | +| Freeze/LoRA/GaLore/APOLLO/BAdam | 16 | 16GB | 32GB | 64GB | 160GB | `2x`GB | +| QLoRA | 8 | 10GB | 20GB | 40GB | 80GB | `x`GB | +| QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | `x/2`GB | +| QLoRA | 2 | 4GB | 8GB | 16GB | 24GB | `x/4`GB | + +## Getting Started + +### Installation + +> [!IMPORTANT] +> Installation is mandatory. + +```bash +git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git +cd LLaMA-Factory +pip install -e ".[torch,metrics]" +``` + +Extra dependencies available: torch, torch-npu, metrics, deepspeed, liger-kernel, bitsandbytes, hqq, eetq, gptq, awq, aqlm, vllm, sglang, galore, apollo, badam, adam-mini, qwen, minicpm_v, modelscope, openmind, swanlab, quality + +> [!TIP] +> Use `pip install --no-deps -e .` to resolve package conflicts. + +
Setting up a virtual environment with uv + +Create an isolated Python environment with [uv](https://github.com/astral-sh/uv): + +```bash +uv sync --extra torch --extra metrics --prerelease=allow +``` + +Run LLaMA-Factory in the isolated environment: + +```bash +uv run --prerelease=allow llamafactory-cli train examples/train_lora/llama3_lora_pretrain.yaml +``` + +
+ +
For Windows users + +#### Install BitsAndBytes + +If you want to enable the quantized LoRA (QLoRA) on the Windows platform, you need to install a pre-built version of `bitsandbytes` library, which supports CUDA 11.1 to 12.2, please select the appropriate [release version](https://github.com/jllllll/bitsandbytes-windows-webui/releases/tag/wheels) based on your CUDA version. + +```bash +pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.2.post2-py3-none-win_amd64.whl +``` + +#### Install Flash Attention-2 + +To enable FlashAttention-2 on the Windows platform, please use the script from [flash-attention-windows-wheel](https://huggingface.co/lldacing/flash-attention-windows-wheel) to compile and install it by yourself. + +
+ +
For Ascend NPU users + +To install LLaMA Factory on Ascend NPU devices, please upgrade Python to version 3.10 or higher and specify extra dependencies: `pip install -e ".[torch-npu,metrics]"`. Additionally, you need to install the **[Ascend CANN Toolkit and Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**. Please follow the [installation tutorial](https://www.hiascend.com/document/detail/en/CANNCommunityEdition/600alphaX/softwareinstall/instg/atlasdeploy_03_0031.html) or use the following commands: + +```bash +# replace the url according to your CANN version and devices +# install CANN Toolkit +wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C20SPC702/Ascend-cann-toolkit_8.0.0.alpha002_linux-"$(uname -i)".run +bash Ascend-cann-toolkit_8.0.0.alpha002_linux-"$(uname -i)".run --install + +# install CANN Kernels +wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C20SPC702/Ascend-cann-kernels-910b_8.0.0.alpha002_linux-"$(uname -i)".run +bash Ascend-cann-kernels-910b_8.0.0.alpha002_linux-"$(uname -i)".run --install + +# set env variables +source /usr/local/Ascend/ascend-toolkit/set_env.sh +``` + +| Requirement | Minimum | Recommend | +| ------------ | ------- | -------------- | +| CANN | 8.0.RC1 | 8.0.0.alpha002 | +| torch | 2.1.0 | 2.4.0 | +| torch-npu | 2.1.0 | 2.4.0.post2 | +| deepspeed | 0.13.2 | 0.13.2 | +| vllm-ascend | - | 0.7.3 | + +Remember to use `ASCEND_RT_VISIBLE_DEVICES` instead of `CUDA_VISIBLE_DEVICES` to specify the device to use. + +If you cannot infer model on NPU devices, try setting `do_sample: false` in the configurations. + +Download the pre-built Docker images: [32GB](http://mirrors.cn-central-221.ovaijisuan.com/detail/130.html) | [64GB](http://mirrors.cn-central-221.ovaijisuan.com/detail/131.html) + +#### Install BitsAndBytes + +To use QLoRA based on bitsandbytes on Ascend NPU, please follow these 3 steps: + +1. Manually compile bitsandbytes: Refer to [the installation documentation](https://huggingface.co/docs/bitsandbytes/installation?backend=Ascend+NPU&platform=Ascend+NPU) for the NPU version of bitsandbytes to complete the compilation and installation. The compilation requires a cmake version of at least 3.22.1 and a g++ version of at least 12.x. + +```bash +# Install bitsandbytes from source +# Clone bitsandbytes repo, Ascend NPU backend is currently enabled on multi-backend-refactor branch +git clone -b multi-backend-refactor https://github.com/bitsandbytes-foundation/bitsandbytes.git +cd bitsandbytes/ + +# Install dependencies +pip install -r requirements-dev.txt + +# Install the dependencies for the compilation tools. Note that the commands for this step may vary depending on the operating system. The following are provided for reference +apt-get install -y build-essential cmake + +# Compile & install +cmake -DCOMPUTE_BACKEND=npu -S . +make +pip install . +``` + +2. Install transformers from the main branch. + +```bash +git clone -b main https://github.com/huggingface/transformers.git +cd transformers +pip install . +``` + +3. Set `double_quantization: false` in the configuration. You can refer to the [example](examples/train_qlora/llama3_lora_sft_bnb_npu.yaml). + +
+ +### Data Preparation + +Please refer to [data/README.md](data/README.md) for checking the details about the format of dataset files. You can use datasets on HuggingFace / ModelScope / Modelers hub, load the dataset in local disk, or specify a path to s3/gcs cloud storage. + +> [!NOTE] +> Please update `data/dataset_info.json` to use your custom dataset. + +You can also use **[Easy Dataset](https://github.com/ConardLi/easy-dataset)** to create synthetic data for fine-tuning. + +### Quickstart + +Use the following 3 commands to run LoRA **fine-tuning**, **inference** and **merging** of the Llama3-8B-Instruct model, respectively. + +```bash +llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml +llamafactory-cli chat examples/inference/llama3_lora_sft.yaml +llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml +``` + +See [examples/README.md](examples/README.md) for advanced usage (including distributed training). + +> [!TIP] +> Use `llamafactory-cli help` to show help information. +> +> Read [FAQs](https://github.com/hiyouga/LLaMA-Factory/issues/4614) first if you encounter any problems. + +### Fine-Tuning with LLaMA Board GUI (powered by [Gradio](https://github.com/gradio-app/gradio)) + +```bash +llamafactory-cli webui +``` + +### Build Docker + +For CUDA users: + +```bash +cd docker/docker-cuda/ +docker compose up -d +docker compose exec llamafactory bash +``` + +For Ascend NPU users: + +```bash +cd docker/docker-npu/ +docker compose up -d +docker compose exec llamafactory bash +``` + +For AMD ROCm users: + +```bash +cd docker/docker-rocm/ +docker compose up -d +docker compose exec llamafactory bash +``` + +
Build without Docker Compose + +For CUDA users: + +```bash +docker build -f ./docker/docker-cuda/Dockerfile \ + --build-arg INSTALL_BNB=false \ + --build-arg INSTALL_VLLM=false \ + --build-arg INSTALL_DEEPSPEED=false \ + --build-arg INSTALL_FLASHATTN=false \ + --build-arg PIP_INDEX=https://pypi.org/simple \ + -t llamafactory:latest . + +docker run -dit --gpus=all \ + -v ./hf_cache:/root/.cache/huggingface \ + -v ./ms_cache:/root/.cache/modelscope \ + -v ./om_cache:/root/.cache/openmind \ + -v ./data:/app/data \ + -v ./output:/app/output \ + -p 7860:7860 \ + -p 8000:8000 \ + --shm-size 16G \ + --name llamafactory \ + llamafactory:latest + +docker exec -it llamafactory bash +``` + +For Ascend NPU users: + +```bash +# Choose docker image upon your environment +docker build -f ./docker/docker-npu/Dockerfile \ + --build-arg INSTALL_DEEPSPEED=false \ + --build-arg PIP_INDEX=https://pypi.org/simple \ + -t llamafactory:latest . + +# Change `device` upon your resources +docker run -dit \ + -v ./hf_cache:/root/.cache/huggingface \ + -v ./ms_cache:/root/.cache/modelscope \ + -v ./om_cache:/root/.cache/openmind \ + -v ./data:/app/data \ + -v ./output:/app/output \ + -v /usr/local/dcmi:/usr/local/dcmi \ + -v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \ + -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \ + -v /etc/ascend_install.info:/etc/ascend_install.info \ + -p 7860:7860 \ + -p 8000:8000 \ + --device /dev/davinci0 \ + --device /dev/davinci_manager \ + --device /dev/devmm_svm \ + --device /dev/hisi_hdc \ + --shm-size 16G \ + --name llamafactory \ + llamafactory:latest + +docker exec -it llamafactory bash +``` + +For AMD ROCm users: + +```bash +docker build -f ./docker/docker-rocm/Dockerfile \ + --build-arg INSTALL_BNB=false \ + --build-arg INSTALL_VLLM=false \ + --build-arg INSTALL_DEEPSPEED=false \ + --build-arg INSTALL_FLASHATTN=false \ + --build-arg PIP_INDEX=https://pypi.org/simple \ + -t llamafactory:latest . + +docker run -dit \ + -v ./hf_cache:/root/.cache/huggingface \ + -v ./ms_cache:/root/.cache/modelscope \ + -v ./om_cache:/root/.cache/openmind \ + -v ./data:/app/data \ + -v ./output:/app/output \ + -v ./saves:/app/saves \ + -p 7860:7860 \ + -p 8000:8000 \ + --device /dev/kfd \ + --device /dev/dri \ + --shm-size 16G \ + --name llamafactory \ + llamafactory:latest + +docker exec -it llamafactory bash +``` + +
+ +
Details about volume + +- `hf_cache`: Utilize Hugging Face cache on the host machine. Reassignable if a cache already exists in a different directory. +- `ms_cache`: Similar to Hugging Face cache but for ModelScope users. +- `om_cache`: Similar to Hugging Face cache but for Modelers users. +- `data`: Place datasets on this dir of the host machine so that they can be selected on LLaMA Board GUI. +- `output`: Set export dir to this location so that the merged result can be accessed directly on the host machine. + +
+ +### Deploy with OpenAI-style API and vLLM + +```bash +API_PORT=8000 llamafactory-cli api examples/inference/llama3_vllm.yaml +``` + +> [!TIP] +> Visit [this page](https://platform.openai.com/docs/api-reference/chat/create) for API document. +> +> Examples: [Image understanding](scripts/api_example/test_image.py) | [Function calling](scripts/api_example/test_toolcall.py) + +### Download from ModelScope Hub + +If you have trouble with downloading models and datasets from Hugging Face, you can use ModelScope. + +```bash +export USE_MODELSCOPE_HUB=1 # `set USE_MODELSCOPE_HUB=1` for Windows +``` + +Train the model by specifying a model ID of the ModelScope Hub as the `model_name_or_path`. You can find a full list of model IDs at [ModelScope Hub](https://modelscope.cn/models), e.g., `LLM-Research/Meta-Llama-3-8B-Instruct`. + +### Download from Modelers Hub + +You can also use Modelers Hub to download models and datasets. + +```bash +export USE_OPENMIND_HUB=1 # `set USE_OPENMIND_HUB=1` for Windows +``` + +Train the model by specifying a model ID of the Modelers Hub as the `model_name_or_path`. You can find a full list of model IDs at [Modelers Hub](https://modelers.cn/models), e.g., `TeleAI/TeleChat-7B-pt`. + +### Use W&B Logger + +To use [Weights & Biases](https://wandb.ai) for logging experimental results, you need to add the following arguments to yaml files. + +```yaml +report_to: wandb +run_name: test_run # optional +``` + +Set `WANDB_API_KEY` to [your key](https://wandb.ai/authorize) when launching training tasks to log in with your W&B account. + +### Use SwanLab Logger + +To use [SwanLab](https://github.com/SwanHubX/SwanLab) for logging experimental results, you need to add the following arguments to yaml files. + +```yaml +use_swanlab: true +swanlab_run_name: test_run # optional +``` + +When launching training tasks, you can log in to SwanLab in three ways: + +1. Add `swanlab_api_key=` to the yaml file, and set it to your [API key](https://swanlab.cn/settings). +2. Set the environment variable `SWANLAB_API_KEY` to your [API key](https://swanlab.cn/settings). +3. Use the `swanlab login` command to complete the login. + +## Projects using LLaMA Factory + +If you have a project that should be incorporated, please contact via email or create a pull request. + +
Click to show + +1. Wang et al. ESRL: Efficient Sampling-based Reinforcement Learning for Sequence Generation. 2023. [[arxiv]](https://arxiv.org/abs/2308.02223) +1. Yu et al. Open, Closed, or Small Language Models for Text Classification? 2023. [[arxiv]](https://arxiv.org/abs/2308.10092) +1. Wang et al. UbiPhysio: Support Daily Functioning, Fitness, and Rehabilitation with Action Understanding and Feedback in Natural Language. 2023. [[arxiv]](https://arxiv.org/abs/2308.10526) +1. Luceri et al. Leveraging Large Language Models to Detect Influence Campaigns in Social Media. 2023. [[arxiv]](https://arxiv.org/abs/2311.07816) +1. Zhang et al. Alleviating Hallucinations of Large Language Models through Induced Hallucinations. 2023. [[arxiv]](https://arxiv.org/abs/2312.15710) +1. Wang et al. Know Your Needs Better: Towards Structured Understanding of Marketer Demands with Analogical Reasoning Augmented LLMs. KDD 2024. [[arxiv]](https://arxiv.org/abs/2401.04319) +1. Wang et al. CANDLE: Iterative Conceptualization and Instantiation Distillation from Large Language Models for Commonsense Reasoning. ACL 2024. [[arxiv]](https://arxiv.org/abs/2401.07286) +1. Choi et al. FACT-GPT: Fact-Checking Augmentation via Claim Matching with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2402.05904) +1. Zhang et al. AutoMathText: Autonomous Data Selection with Language Models for Mathematical Texts. 2024. [[arxiv]](https://arxiv.org/abs/2402.07625) +1. Lyu et al. KnowTuning: Knowledge-aware Fine-tuning for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11176) +1. Yang et al. LaCo: Large Language Model Pruning via Layer Collaps. 2024. [[arxiv]](https://arxiv.org/abs/2402.11187) +1. Bhardwaj et al. Language Models are Homer Simpson! Safety Re-Alignment of Fine-tuned Language Models through Task Arithmetic. 2024. [[arxiv]](https://arxiv.org/abs/2402.11746) +1. Yang et al. Enhancing Empathetic Response Generation by Augmenting LLMs with Small-scale Empathetic Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11801) +1. Yi et al. Generation Meets Verification: Accelerating Large Language Model Inference with Smart Parallel Auto-Correct Decoding. ACL 2024 Findings. [[arxiv]](https://arxiv.org/abs/2402.11809) +1. Cao et al. Head-wise Shareable Attention for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11819) +1. Zhang et al. Enhancing Multilingual Capabilities of Large Language Models through Self-Distillation from Resource-Rich Languages. 2024. [[arxiv]](https://arxiv.org/abs/2402.12204) +1. Kim et al. Efficient and Effective Vocabulary Expansion Towards Multilingual Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.14714) +1. Yu et al. KIEval: A Knowledge-grounded Interactive Evaluation Framework for Large Language Models. ACL 2024. [[arxiv]](https://arxiv.org/abs/2402.15043) +1. Huang et al. Key-Point-Driven Data Synthesis with its Enhancement on Mathematical Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2403.02333) +1. Duan et al. Negating Negatives: Alignment without Human Positive Samples via Distributional Dispreference Optimization. 2024. [[arxiv]](https://arxiv.org/abs/2403.03419) +1. Xie and Schwertfeger. Empowering Robotics with Large Language Models: osmAG Map Comprehension with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2403.08228) +1. Wu et al. Large Language Models are Parallel Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2403.09073) +1. Zhang et al. EDT: Improving Large Language Models' Generation by Entropy-based Dynamic Temperature Sampling. 2024. [[arxiv]](https://arxiv.org/abs/2403.14541) +1. Weller et al. FollowIR: Evaluating and Teaching Information Retrieval Models to Follow Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2403.15246) +1. Hongbin Na. CBT-LLM: A Chinese Large Language Model for Cognitive Behavioral Therapy-based Mental Health Question Answering. COLING 2024. [[arxiv]](https://arxiv.org/abs/2403.16008) +1. Zan et al. CodeS: Natural Language to Code Repository via Multi-Layer Sketch. 2024. [[arxiv]](https://arxiv.org/abs/2403.16443) +1. Liu et al. Extensive Self-Contrast Enables Feedback-Free Language Model Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2404.00604) +1. Luo et al. BAdam: A Memory Efficient Full Parameter Training Method for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.02827) +1. Du et al. Chinese Tiny LLM: Pretraining a Chinese-Centric Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2404.04167) +1. Ma et al. Parameter Efficient Quasi-Orthogonal Fine-Tuning via Givens Rotation. ICML 2024. [[arxiv]](https://arxiv.org/abs/2404.04316) +1. Liu et al. Dynamic Generation of Personalities with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.07084) +1. Shang et al. How Far Have We Gone in Stripped Binary Code Understanding Using Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.09836) +1. Huang et al. LLMTune: Accelerate Database Knob Tuning with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.11581) +1. Deng et al. Text-Tuple-Table: Towards Information Integration in Text-to-Table Generation via Global Tuple Extraction. 2024. [[arxiv]](https://arxiv.org/abs/2404.14215) +1. Acikgoz et al. Hippocrates: An Open-Source Framework for Advancing Large Language Models in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2404.16621) +1. Zhang et al. Small Language Models Need Strong Verifiers to Self-Correct Reasoning. ACL 2024 Findings. [[arxiv]](https://arxiv.org/abs/2404.17140) +1. Zhou et al. FREB-TQA: A Fine-Grained Robustness Evaluation Benchmark for Table Question Answering. NAACL 2024. [[arxiv]](https://arxiv.org/abs/2404.18585) +1. Xu et al. Large Language Models for Cyber Security: A Systematic Literature Review. 2024. [[arxiv]](https://arxiv.org/abs/2405.04760) +1. Dammu et al. "They are uncultured": Unveiling Covert Harms and Social Threats in LLM Generated Conversations. 2024. [[arxiv]](https://arxiv.org/abs/2405.05378) +1. Yi et al. A safety realignment framework via subspace-oriented model fusion for large language models. 2024. [[arxiv]](https://arxiv.org/abs/2405.09055) +1. Lou et al. SPO: Multi-Dimensional Preference Sequential Alignment With Implicit Reward Modeling. 2024. [[arxiv]](https://arxiv.org/abs/2405.12739) +1. Zhang et al. Getting More from Less: Large Language Models are Good Spontaneous Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2405.13816) +1. Zhang et al. TS-Align: A Teacher-Student Collaborative Framework for Scalable Iterative Finetuning of Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2405.20215) +1. Zihong Chen. Sentence Segmentation and Sentence Punctuation Based on XunziALLM. 2024. [[paper]](https://aclanthology.org/2024.lt4hala-1.30) +1. Gao et al. The Best of Both Worlds: Toward an Honest and Helpful Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2406.00380) +1. Wang and Song. MARS: Benchmarking the Metaphysical Reasoning Abilities of Language Models with a Multi-task Evaluation Dataset. 2024. [[arxiv]](https://arxiv.org/abs/2406.02106) +1. Hu et al. Computational Limits of Low-Rank Adaptation (LoRA) for Transformer-Based Models. 2024. [[arxiv]](https://arxiv.org/abs/2406.03136) +1. Ge et al. Time Sensitive Knowledge Editing through Efficient Finetuning. ACL 2024. [[arxiv]](https://arxiv.org/abs/2406.04496) +1. Tan et al. Peer Review as A Multi-Turn and Long-Context Dialogue with Role-Based Interactions. 2024. [[arxiv]](https://arxiv.org/abs/2406.05688) +1. Song et al. Turbo Sparse: Achieving LLM SOTA Performance with Minimal Activated Parameters. 2024. [[arxiv]](https://arxiv.org/abs/2406.05955) +1. Gu et al. RWKV-CLIP: A Robust Vision-Language Representation Learner. 2024. [[arxiv]](https://arxiv.org/abs/2406.06973) +1. Chen et al. Advancing Tool-Augmented Large Language Models: Integrating Insights from Errors in Inference Trees. 2024. [[arxiv]](https://arxiv.org/abs/2406.07115) +1. Zhu et al. Are Large Language Models Good Statisticians?. 2024. [[arxiv]](https://arxiv.org/abs/2406.07815) +1. Li et al. Know the Unknown: An Uncertainty-Sensitive Method for LLM Instruction Tuning. 2024. [[arxiv]](https://arxiv.org/abs/2406.10099) +1. Ding et al. IntentionQA: A Benchmark for Evaluating Purchase Intention Comprehension Abilities of Language Models in E-commerce. 2024. [[arxiv]](https://arxiv.org/abs/2406.10173) +1. He et al. COMMUNITY-CROSS-INSTRUCT: Unsupervised Instruction Generation for Aligning Large Language Models to Online Communities. 2024. [[arxiv]](https://arxiv.org/abs/2406.12074) +1. Lin et al. FVEL: Interactive Formal Verification Environment with Large Language Models via Theorem Proving. 2024. [[arxiv]](https://arxiv.org/abs/2406.14408) +1. Treutlein et al. Connecting the Dots: LLMs can Infer and Verbalize Latent Structure from Disparate Training Data. 2024. [[arxiv]](https://arxiv.org/abs/2406.14546) +1. Feng et al. SS-Bench: A Benchmark for Social Story Generation and Evaluation. 2024. [[arxiv]](https://arxiv.org/abs/2406.15695) +1. Feng et al. Self-Constructed Context Decompilation with Fined-grained Alignment Enhancement. 2024. [[arxiv]](https://arxiv.org/abs/2406.17233) +1. Liu et al. Large Language Models for Cuffless Blood Pressure Measurement From Wearable Biosignals. 2024. [[arxiv]](https://arxiv.org/abs/2406.18069) +1. Iyer et al. Exploring Very Low-Resource Translation with LLMs: The University of Edinburgh's Submission to AmericasNLP 2024 Translation Task. AmericasNLP 2024. [[paper]](https://aclanthology.org/2024.americasnlp-1.25) +1. Li et al. Calibrating LLMs with Preference Optimization on Thought Trees for Generating Rationale in Science Question Scoring. 2024. [[arxiv]](https://arxiv.org/abs/2406.19949) +1. Yang et al. Financial Knowledge Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2407.00365) +1. Lin et al. DogeRM: Equipping Reward Models with Domain Knowledge through Model Merging. 2024. [[arxiv]](https://arxiv.org/abs/2407.01470) +1. Bako et al. Evaluating the Semantic Profiling Abilities of LLMs for Natural Language Utterances in Data Visualization. 2024. [[arxiv]](https://arxiv.org/abs/2407.06129) +1. Huang et al. RoLoRA: Fine-tuning Rotated Outlier-free LLMs for Effective Weight-Activation Quantization. 2024. [[arxiv]](https://arxiv.org/abs/2407.08044) +1. Jiang et al. LLM-Collaboration on Automatic Science Journalism for the General Audience. 2024. [[arxiv]](https://arxiv.org/abs/2407.09756) +1. Inouye et al. Applied Auto-tuning on LoRA Hyperparameters. 2024. [[paper]](https://scholarcommons.scu.edu/cseng_senior/272/) +1. Qi et al. Research on Tibetan Tourism Viewpoints information generation system based on LLM. 2024. [[arxiv]](https://arxiv.org/abs/2407.13561) +1. Xu et al. Course-Correction: Safety Alignment Using Synthetic Preferences. 2024. [[arxiv]](https://arxiv.org/abs/2407.16637) +1. Sun et al. LAMBDA: A Large Model Based Data Agent. 2024. [[arxiv]](https://arxiv.org/abs/2407.17535) +1. Zhu et al. CollectiveSFT: Scaling Large Language Models for Chinese Medical Benchmark with Collective Instructions in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2407.19705) +1. Yu et al. Correcting Negative Bias in Large Language Models through Negative Attention Score Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2408.00137) +1. Xie et al. The Power of Personalized Datasets: Advancing Chinese Composition Writing for Elementary School through Targeted Model Fine-Tuning. IALP 2024. [[paper]](https://www.asianlp.sg/conferences/ialp2024/proceedings/papers/IALP2024_P055.pdf) +1. Liu et al. Instruct-Code-Llama: Improving Capabilities of Language Model in Competition Level Code Generation by Online Judge Feedback. ICIC 2024. [[paper]](https://link.springer.com/chapter/10.1007/978-981-97-5669-8_11) +1. Wang et al. Cybernetic Sentinels: Unveiling the Impact of Safety Data Selection on Model Security in Supervised Fine-Tuning. ICIC 2024. [[paper]](https://link.springer.com/chapter/10.1007/978-981-97-5669-8_23) +1. Xia et al. Understanding the Performance and Estimating the Cost of LLM Fine-Tuning. 2024. [[arxiv]](https://arxiv.org/abs/2408.04693) +1. Zeng et al. Perceive, Reflect, and Plan: Designing LLM Agent for Goal-Directed City Navigation without Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2408.04168) +1. Xia et al. Using Pre-trained Language Model for Accurate ESG Prediction. FinNLP 2024. [[paper]](https://aclanthology.org/2024.finnlp-2.1/) +1. Liang et al. I-SHEEP: Self-Alignment of LLM from Scratch through an Iterative Self-Enhancement Paradigm. 2024. [[arxiv]](https://arxiv.org/abs/2408.08072) +1. Bai et al. Aligning Large Language Model with Direct Multi-Preference Optimization for Recommendation. CIKM 2024. [[paper]](https://dl.acm.org/doi/10.1145/3627673.3679611) +1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: A large language model for Astronomy, based on ChatGLM2-6B and Qwen-14B. +1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: A large language model specialized in Chinese legal domain, based on Baichuan-13B, is capable of retrieving and reasoning on legal knowledge. +1. **[Sunsimiao](https://github.com/X-D-Lab/Sunsimiao)**: A large language model specialized in Chinese medical domain, based on Baichuan-7B and ChatGLM-6B. +1. **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: A series of large language models for Chinese medical domain, based on LLaMA2-7B and Baichuan-13B. +1. **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**: A series of MBTI Personality large language models, capable of giving any LLM 16 different personality types based on different datasets and training methods. +1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**: A large language model specialized in generate metadata for stable diffusion. [[demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt) +1. **[Chinese-LLaVA-Med](https://github.com/BUAADreamer/Chinese-LLaVA-Med)**: A multimodal large language model specialized in Chinese medical domain, based on LLaVA-1.5-7B. +1. **[AutoRE](https://github.com/THUDM/AutoRE)**: A document-level relation extraction system based on large language models. +1. **[NVIDIA RTX AI Toolkit](https://github.com/NVIDIA/RTX-AI-Toolkit)**: SDKs for fine-tuning LLMs on Windows PC for NVIDIA RTX. +1. **[LazyLLM](https://github.com/LazyAGI/LazyLLM)**: An easy and lazy way for building multi-agent LLMs applications and supports model fine-tuning via LLaMA Factory. +1. **[RAG-Retrieval](https://github.com/NLPJCL/RAG-Retrieval)**: A full pipeline for RAG retrieval model fine-tuning, inference, and distillation. [[blog]](https://zhuanlan.zhihu.com/p/987727357) +1. **[360-LLaMA-Factory](https://github.com/Qihoo360/360-LLaMA-Factory)**: A modified library that supports long sequence SFT & DPO using ring attention. +1. **[Sky-T1](https://novasky-ai.github.io/posts/sky-t1/)**: An o1-like model fine-tuned by NovaSky AI with very small cost. + +
+ +## License + +This repository is licensed under the [Apache-2.0 License](LICENSE). + +Please follow the model licenses to use the corresponding model weights: [Baichuan 2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Command R](https://cohere.com/c4ai-cc-by-nc-license) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [GLM-4](https://huggingface.co/THUDM/glm-4-9b/blob/main/LICENSE) / [GPT-2](https://github.com/openai/gpt-2/blob/master/LICENSE) / [Granite](LICENSE) / [Index](https://huggingface.co/IndexTeam/Index-1.9B/blob/main/LICENSE) / [InternLM](https://github.com/InternLM/InternLM#license) / [Llama](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [Llama 2](https://ai.meta.com/llama/license/) / [Llama 3](https://llama.meta.com/llama3/license/) / [Llama 4](https://github.com/meta-llama/llama-models/blob/main/models/llama4/LICENSE) / [MiniCPM](https://github.com/OpenBMB/MiniCPM/blob/main/MiniCPM%20Model%20License.md) / [Mistral/Mixtral/Pixtral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/Phi-2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Phi-3/Phi-4](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [Skywork](https://huggingface.co/Skywork/Skywork-13B-base/blob/main/Skywork%20Community%20License.pdf) / [StarCoder 2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [TeleChat2](https://huggingface.co/Tele-AI/telechat-7B/blob/main/TeleChat%E6%A8%A1%E5%9E%8B%E7%A4%BE%E5%8C%BA%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yi-1.5](LICENSE) / [Yuan 2](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan) + +## Citation + +If this work is helpful, please kindly cite as: + +```bibtex +@inproceedings{zheng2024llamafactory, + title={LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models}, + author={Yaowei Zheng and Richong Zhang and Junhao Zhang and Yanhan Ye and Zheyan Luo and Zhangchi Feng and Yongqiang Ma}, + booktitle={Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)}, + address={Bangkok, Thailand}, + publisher={Association for Computational Linguistics}, + year={2024}, + url={http://arxiv.org/abs/2403.13372} +} +``` + +## Acknowledgement + +This repo benefits from [PEFT](https://github.com/huggingface/peft), [TRL](https://github.com/huggingface/trl), [QLoRA](https://github.com/artidoro/qlora) and [FastChat](https://github.com/lm-sys/FastChat). Thanks for their wonderful works. + +## Star History + +![Star History Chart](https://api.star-history.com/svg?repos=hiyouga/LLaMA-Factory&type=Date) diff --git a/post-training/LLaMA-Factory/README_zh.md b/post-training/LLaMA-Factory/README_zh.md new file mode 100644 index 0000000..e563c96 --- /dev/null +++ b/post-training/LLaMA-Factory/README_zh.md @@ -0,0 +1,919 @@ +![# LLaMA Factory](assets/logo.png) + +[![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/LLaMA-Factory?style=social)](https://github.com/hiyouga/LLaMA-Factory/stargazers) +[![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Factory)](https://github.com/hiyouga/LLaMA-Factory/commits/main) +[![GitHub contributors](https://img.shields.io/github/contributors/hiyouga/LLaMA-Factory?color=orange)](https://github.com/hiyouga/LLaMA-Factory/graphs/contributors) +[![GitHub workflow](https://github.com/hiyouga/LLaMA-Factory/actions/workflows/tests.yml/badge.svg)](https://github.com/hiyouga/LLaMA-Factory/actions/workflows/tests.yml) +[![PyPI](https://img.shields.io/pypi/v/llamafactory)](https://pypi.org/project/llamafactory/) +[![Citation](https://img.shields.io/badge/citation-392-green)](https://scholar.google.com/scholar?cites=12620864006390196564) +[![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Factory/pulls) + +[![Twitter](https://img.shields.io/twitter/follow/llamafactory_ai)](https://twitter.com/llamafactory_ai) +[![Discord](https://dcbadge.vercel.app/api/server/rKfvV9r9FK?compact=true&style=flat)](https://discord.gg/rKfvV9r9FK) +[![GitCode](https://gitcode.com/zhengyaowei/LLaMA-Factory/star/badge.svg)](https://gitcode.com/zhengyaowei/LLaMA-Factory) + +[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing) +[![Open in DSW](https://gallery.pai-ml.com/assets/open-in-dsw.svg)](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory) +[![Spaces](https://img.shields.io/badge/🤗-Open%20in%20Spaces-blue)](https://huggingface.co/spaces/hiyouga/LLaMA-Board) +[![Studios](https://img.shields.io/badge/ModelScope-Open%20in%20Studios-blue)](https://modelscope.cn/studios/hiyouga/LLaMA-Board) +[![SageMaker](https://img.shields.io/badge/SageMaker-Open%20in%20AWS-blue)](https://aws.amazon.com/cn/blogs/china/a-one-stop-code-free-model-fine-tuning-deployment-platform-based-on-sagemaker-and-llama-factory/) + +

+ 使用零代码命令行Web UI 轻松微调百余种大模型 +

+

+ + Github trend + +

+ + +👋 加入我们的[微信群](assets/wechat.jpg)或 [NPU 用户群](assets/wechat_npu.jpg)。 + +\[ [English](README.md) | 中文 \] + +**微调大模型可以像这样轻松…** + +https://github.com/user-attachments/assets/43b700c6-a178-41db-b1f8-8190a5d3fcfc + +选择你的打开方式: + +- **入门教程**:https://zhuanlan.zhihu.com/p/695287607 +- **框架文档**:https://llamafactory.readthedocs.io/zh-cn/latest/ +- **框架文档(昇腾 NPU)**:https://ascend.github.io/docs/sources/llamafactory/ +- **Colab(免费)**:https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing +- **本地机器**:请见[如何使用](#如何使用) +- **PAI-DSW(免费试用)**:[Llama3 案例](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory) | [Qwen2-VL 案例](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_qwen2vl) | [DeepSeek-R1-Distill 案例](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_deepseek_r1_distill_7b) +- **Amazon SageMaker**:[博客](https://aws.amazon.com/cn/blogs/china/a-one-stop-code-free-model-fine-tuning-deployment-platform-based-on-sagemaker-and-llama-factory/) +- **Easy Dataset**:[数据蒸馏微调](https://buaa-act.feishu.cn/wiki/KY9xwTGs1iqHrRkjXBwcZP9WnL9) + +> [!NOTE] +> 除上述链接以外的其他网站均为未经许可的第三方网站,请小心甄别。 + +## 目录 + +- [项目特色](#项目特色) +- [性能指标](#性能指标) +- [更新日志](#更新日志) +- [模型](#模型) +- [训练方法](#训练方法) +- [数据集](#数据集) +- [软硬件依赖](#软硬件依赖) +- [如何使用](#如何使用) + - [安装 LLaMA Factory](#安装-llama-factory) + - [数据准备](#数据准备) + - [快速开始](#快速开始) + - [LLaMA Board 可视化微调](#llama-board-可视化微调由-gradio-驱动) + - [构建 Docker](#构建-docker) + - [利用 vLLM 部署 OpenAI API](#利用-vllm-部署-openai-api) + - [从魔搭社区下载](#从魔搭社区下载) + - [从魔乐社区下载](#从魔乐社区下载) + - [使用 W&B 面板](#使用-wb-面板) + - [使用 SwanLab 面板](#使用-swanlab-面板) +- [使用了 LLaMA Factory 的项目](#使用了-llama-factory-的项目) +- [协议](#协议) +- [引用](#引用) +- [致谢](#致谢) + +## 项目特色 + +- **多种模型**:LLaMA、LLaVA、Mistral、Mixtral-MoE、Qwen、Qwen2-VL、DeepSeek、Yi、Gemma、ChatGLM、Phi 等等。 +- **集成方法**:(增量)预训练、(多模态)指令监督微调、奖励模型训练、PPO 训练、DPO 训练、KTO 训练、ORPO 训练等等。 +- **多种精度**:16 比特全参数微调、冻结微调、LoRA 微调和基于 AQLM/AWQ/GPTQ/LLM.int8/HQQ/EETQ 的 2/3/4/5/6/8 比特 QLoRA 微调。 +- **先进算法**:[GaLore](https://github.com/jiaweizzhao/GaLore)、[BAdam](https://github.com/Ledzy/BAdam)、[APOLLO](https://github.com/zhuhanqing/APOLLO)、[Adam-mini](https://github.com/zyushun/Adam-mini)、DoRA、LongLoRA、LLaMA Pro、Mixture-of-Depths、LoRA+、LoftQ 和 PiSSA。 +- **实用技巧**:[FlashAttention-2](https://github.com/Dao-AILab/flash-attention)、[Unsloth](https://github.com/unslothai/unsloth)、[Liger Kernel](https://github.com/linkedin/Liger-Kernel)、RoPE scaling、NEFTune 和 rsLoRA。 +- **广泛任务**:多轮对话、工具调用、图像理解、视觉定位、视频识别和语音理解等等。 +- **实验监控**:LlamaBoard、TensorBoard、Wandb、MLflow、[SwanLab](https://github.com/SwanHubX/SwanLab) 等等。 +- **极速推理**:基于 [vLLM](https://github.com/vllm-project/vllm) 或 [SGLang](https://github.com/sgl-project/sglang) 的 OpenAI 风格 API、浏览器界面和命令行接口。 + +### 最新模型的 Day-N 微调适配 + +| 适配时间 | 模型名称 | +| ------------ | ------------------------------------------------------------ | +| Day 0 | Qwen2.5 / Qwen2.5-VL / Gemma 3 / InternLM 3 / MiniCPM-o-2.6 | +| Day 1 | Llama 3 / GLM-4 / Mistral Small / PaliGemma2 / Llama 4 | + +## 性能指标 + +与 ChatGLM 官方的 [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ptuning) 微调相比,LLaMA Factory 的 LoRA 微调提供了 **3.7 倍**的加速比,同时在广告文案生成任务上取得了更高的 Rouge 分数。结合 4 比特量化技术,LLaMA Factory 的 QLoRA 微调进一步降低了 GPU 显存消耗。 + +![benchmark](assets/benchmark.svg) + +
变量定义 + +- **Training Speed**: 训练阶段每秒处理的样本数量。(批处理大小=4,截断长度=1024) +- **Rouge Score**: [广告文案生成](https://aclanthology.org/D19-1321.pdf)任务验证集上的 Rouge-2 分数。(批处理大小=4,截断长度=1024) +- **GPU Memory**: 4 比特量化训练的 GPU 显存峰值。(批处理大小=1,截断长度=1024) +- 我们在 ChatGLM 的 P-Tuning 中采用 `pre_seq_len=128`,在 LLaMA Factory 的 LoRA 微调中采用 `lora_rank=32`。 + +
+ +## 更新日志 + +[25/04/16] 我们支持了 **[InternVL3](https://huggingface.co/OpenGVLab/InternVL3-8B)** 模型的微调。查看 [PR #7258](https://github.com/hiyouga/LLaMA-Factory/pull/7258) 以使用。 + +[25/04/14] 我们支持了 **[GLM-Z1](https://huggingface.co/THUDM/GLM-Z1-9B-0414)** 和 **[Kimi-VL](https://huggingface.co/moonshotai/Kimi-VL-A3B-Instruct)** 模型的微调。 + +[25/04/06] 我们支持了 **[Llama 4](https://ai.meta.com/blog/llama-4-multimodal-intelligence/)** 模型的微调。查看 [PR #7611](https://github.com/hiyouga/LLaMA-Factory/pull/7611) 以使用。 + +[25/03/31] 我们支持了 **[Qwen2.5 Omni](https://qwenlm.github.io/blog/qwen2.5-omni/)** 模型的微调。查看 [PR #7537](https://github.com/hiyouga/LLaMA-Factory/pull/7537) 以使用。 + +
展开日志 + +[25/03/15] 我们支持了 **[SGLang](https://github.com/sgl-project/sglang)** 推理后端,请使用 `infer_backend: sglang` 启用。 + +[25/03/12] 我们支持了 **[Gemma 3](https://huggingface.co/blog/gemma3)** 模型的微调。 + +[25/02/24] 我们宣布开源 **[EasyR1](https://github.com/hiyouga/EasyR1)**,一个高效可扩展的多模态强化学习框架,支持高效的 GRPO 训练。 + +[25/02/11] 我们支持了在导出模型时保存 **[Ollama](https://github.com/ollama/ollama)** 配置文件。详细用法请参照 [examples](examples/README_zh.md)。 + +[25/02/05] 我们支持了在语音理解任务上微调 **[Qwen2-Audio](Qwen/Qwen2-Audio-7B-Instruct)** 和 **[MiniCPM-o-2.6](https://huggingface.co/openbmb/MiniCPM-o-2_6)** 模型。 + +[25/01/31] 我们支持了 **[DeepSeek-R1](https://huggingface.co/deepseek-ai/DeepSeek-R1)** 和 **[Qwen2.5-VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct)** 模型的微调。 + +[25/01/15] 我们支持了 **[APOLLO](https://arxiv.org/abs/2412.05270)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。 + +[25/01/14] 我们支持了 **[MiniCPM-o-2.6](https://huggingface.co/openbmb/MiniCPM-o-2_6)** 和 **[MiniCPM-V-2.6](https://huggingface.co/openbmb/MiniCPM-V-2_6)** 模型的微调。 感谢 [@BUAADreamer](https://github.com/BUAADreamer) 的 PR. + +[25/01/14] 我们支持了 **[InternLM 3](https://huggingface.co/collections/internlm/)** 模型的微调。感谢 [@hhaAndroid](https://github.com/hhaAndroid) 的 PR。 + +[25/01/10] 我们支持了 **[Phi-4](https://huggingface.co/microsoft/phi-4)** 模型的微调。 + +[24/12/21] 我们支持了使用 **[SwanLab](https://github.com/SwanHubX/SwanLab)** 跟踪与可视化实验。详细用法请参考 [此部分](#使用-swanlab-面板)。 + +[24/11/27] 我们支持了 **[Skywork-o1](https://huggingface.co/Skywork/Skywork-o1-Open-Llama-3.1-8B)** 模型的微调和 **[OpenO1](https://huggingface.co/datasets/O1-OPEN/OpenO1-SFT)** 数据集。 + +[24/10/09] 我们支持了从 **[魔乐社区](https://modelers.cn/models)** 下载预训练模型和数据集。详细用法请参照 [此教程](#从魔乐社区下载)。 + +[24/09/19] 我们支持了 **[Qwen2.5](https://qwenlm.github.io/blog/qwen2.5/)** 模型的微调。 + +[24/08/30] 我们支持了 **[Qwen2-VL](https://qwenlm.github.io/blog/qwen2-vl/)** 模型的微调。感谢 [@simonJJJ](https://github.com/simonJJJ) 的 PR。 + +[24/08/27] 我们支持了 **[Liger Kernel](https://github.com/linkedin/Liger-Kernel)**。请使用 `enable_liger_kernel: true` 来加速训练。 + +[24/08/09] 我们支持了 **[Adam-mini](https://github.com/zyushun/Adam-mini)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。感谢 [@relic-yuexi](https://github.com/relic-yuexi) 的 PR。 + +[24/07/04] 我们支持了[无污染打包训练](https://github.com/MeetKai/functionary/tree/main/functionary/train/packing)。请使用 `neat_packing: true` 参数。感谢 [@chuan298](https://github.com/chuan298) 的 PR。 + +[24/06/16] 我们支持了 **[PiSSA](https://arxiv.org/abs/2404.02948)** 算法。详细用法请参照 [examples](examples/README_zh.md)。 + +[24/06/07] 我们支持了 **[Qwen2](https://qwenlm.github.io/blog/qwen2/)** 和 **[GLM-4](https://github.com/THUDM/GLM-4)** 模型的微调。 + +[24/05/26] 我们支持了 **[SimPO](https://arxiv.org/abs/2405.14734)** 偏好对齐算法。详细用法请参照 [examples](examples/README_zh.md)。 + +[24/05/20] 我们支持了 **PaliGemma** 系列模型的微调。注意 PaliGemma 是预训练模型,你需要使用 `paligemma` 模板进行微调使其获得对话能力。 + +[24/05/18] 我们支持了 **[KTO](https://arxiv.org/abs/2402.01306)** 偏好对齐算法。详细用法请参照 [examples](examples/README_zh.md)。 + +[24/05/14] 我们支持了昇腾 NPU 设备的训练和推理。详情请查阅[安装](#安装-llama-factory)部分。 + +[24/04/26] 我们支持了多模态模型 **LLaVA-1.5** 的微调。详细用法请参照 [examples](examples/README_zh.md)。 + +[24/04/22] 我们提供了在免费 T4 GPU 上微调 Llama-3 模型的 **[Colab 笔记本](https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing)**。Hugging Face 社区公开了两个利用 LLaMA Factory 微调的 Llama-3 模型,详情请见 [Llama3-8B-Chinese-Chat](https://huggingface.co/shenzhi-wang/Llama3-8B-Chinese-Chat) 和 [Llama3-Chinese](https://huggingface.co/zhichen/Llama3-Chinese)。 + +[24/04/21] 我们基于 [AstraMindAI 的仓库](https://github.com/astramind-ai/Mixture-of-depths)支持了 **[混合深度训练](https://arxiv.org/abs/2404.02258)**。详细用法请参照 [examples](examples/README_zh.md)。 + +[24/04/16] 我们支持了 **[BAdam](https://arxiv.org/abs/2404.02827)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。 + +[24/04/16] 我们支持了 **[unsloth](https://github.com/unslothai/unsloth)** 的长序列训练(24GB 可训练 Llama-2-7B-56k)。该方法相比 FlashAttention-2 提供了 **117%** 的训练速度和 **50%** 的显存节约。更多数据请见[此页面](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison)。 + +[24/03/31] 我们支持了 **[ORPO](https://arxiv.org/abs/2403.07691)**。详细用法请参照 [examples](examples/README_zh.md)。 + +[24/03/21] 我们的论文 "[LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models](https://arxiv.org/abs/2403.13372)" 可在 arXiv 上查看! + +[24/03/20] 我们支持了能在 2x24GB GPU 上微调 70B 模型的 **FSDP+QLoRA**。详细用法请参照 [examples](examples/README_zh.md)。 + +[24/03/13] 我们支持了 **[LoRA+](https://arxiv.org/abs/2402.12354)**。详细用法请参照 [examples](examples/README_zh.md)。 + +[24/03/07] 我们支持了 **[GaLore](https://arxiv.org/abs/2403.03507)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。 + +[24/03/07] 我们集成了 **[vLLM](https://github.com/vllm-project/vllm)** 以实现极速并发推理。请使用 `infer_backend: vllm` 来获得 **270%** 的推理速度。 + +[24/02/28] 我们支持了 **[DoRA](https://arxiv.org/abs/2402.09353)** 微调。请使用 `use_dora: true` 参数进行 DoRA 微调。 + +[24/02/15] 我们支持了 [LLaMA Pro](https://github.com/TencentARC/LLaMA-Pro) 提出的**块扩展**方法。详细用法请参照 [examples](examples/README_zh.md)。 + +[24/02/05] Qwen1.5(Qwen2 测试版)系列模型已在 LLaMA-Factory 中实现微调支持。详情请查阅该[博客页面](https://qwenlm.github.io/zh/blog/qwen1.5/)。 + +[24/01/18] 我们针对绝大多数模型实现了 **Agent 微调**,微调时指定 `dataset: glaive_toolcall_zh` 即可使模型获得工具调用能力。 + +[23/12/23] 我们针对 LLaMA, Mistral 和 Yi 模型支持了 **[unsloth](https://github.com/unslothai/unsloth)** 的 LoRA 训练加速。请使用 `use_unsloth: true` 参数启用 unsloth 优化。该方法可提供 **170%** 的训练速度,详情请查阅[此页面](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison)。 + +[23/12/12] 我们支持了微调最新的混合专家模型 **[Mixtral 8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1)**。硬件需求请查阅[此处](#硬件依赖)。 + +[23/12/01] 我们支持了从 **[魔搭社区](https://modelscope.cn/models)** 下载预训练模型和数据集。详细用法请参照 [此教程](#从魔搭社区下载)。 + +[23/10/21] 我们支持了 **[NEFTune](https://arxiv.org/abs/2310.05914)** 训练技巧。请使用 `neftune_noise_alpha: 5` 参数启用 NEFTune。 + +[23/09/27] 我们针对 LLaMA 模型支持了 [LongLoRA](https://github.com/dvlab-research/LongLoRA) 提出的 **$S^2$-Attn**。请使用 `shift_attn: true` 参数以启用该功能。 + +[23/09/23] 我们在项目中集成了 MMLU、C-Eval 和 CMMLU 评估集。详细用法请参照 [examples](examples/README_zh.md)。 + +[23/09/10] 我们支持了 **[FlashAttention-2](https://github.com/Dao-AILab/flash-attention)**。如果您使用的是 RTX4090、A100 或 H100 GPU,请使用 `flash_attn: fa2` 参数以启用 FlashAttention-2。 + +[23/08/12] 我们支持了 **RoPE 插值**来扩展 LLaMA 模型的上下文长度。请使用 `rope_scaling: linear` 参数训练模型或使用 `rope_scaling: dynamic` 参数评估模型。 + +[23/08/11] 我们支持了指令模型的 **[DPO 训练](https://arxiv.org/abs/2305.18290)**。详细用法请参照 [examples](examples/README_zh.md)。 + +[23/07/31] 我们支持了**数据流式加载**。请使用 `streaming: true` 和 `max_steps: 10000` 参数来流式加载数据集。 + +[23/07/29] 我们在 Hugging Face 发布了两个 13B 指令微调模型。详细内容请查阅我们的 Hugging Face 项目([LLaMA-2](https://huggingface.co/hiyouga/Llama-2-Chinese-13b-chat) / [Baichuan](https://huggingface.co/hiyouga/Baichuan-13B-sft))。 + +[23/07/18] 我们开发了支持训练和测试的**浏览器一体化界面**。请使用 `train_web.py` 在您的浏览器中微调模型。感谢 [@KanadeSiina](https://github.com/KanadeSiina) 和 [@codemayq](https://github.com/codemayq) 在该功能开发中付出的努力。 + +[23/07/09] 我们开源了 **[FastEdit](https://github.com/hiyouga/FastEdit)** ⚡🩹,一个简单易用的、能迅速编辑大模型事实记忆的工具包。如果您感兴趣请关注我们的 [FastEdit](https://github.com/hiyouga/FastEdit) 项目。 + +[23/06/29] 我们提供了一个**可复现的**指令模型微调示例,详细内容请查阅 [Baichuan-7B-sft](https://huggingface.co/hiyouga/Baichuan-7B-sft)。 + +[23/06/22] 我们对齐了[示例 API](src/api_demo.py) 与 [OpenAI API](https://platform.openai.com/docs/api-reference/chat) 的格式,您可以将微调模型接入**任意基于 ChatGPT 的应用**中。 + +[23/06/03] 我们实现了 4 比特的 LoRA 训练(也称 **[QLoRA](https://github.com/artidoro/qlora)**)。详细用法请参照 [examples](examples/README_zh.md)。 + +
+ +## 模型 + +| 模型名 | 参数量 | Template | +| ----------------------------------------------------------------- | -------------------------------- | ------------------- | +| [Baichuan 2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 | +| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - | +| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 | +| [Command R](https://huggingface.co/CohereForAI) | 35B/104B | cohere | +| [DeepSeek (Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek | +| [DeepSeek 2.5/3](https://huggingface.co/deepseek-ai) | 236B/671B | deepseek3 | +| [DeepSeek R1 (Distill)](https://huggingface.co/deepseek-ai) | 1.5B/7B/8B/14B/32B/70B/671B | deepseek3 | +| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon | +| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma | +| [Gemma 3](https://huggingface.co/google) | 1B/4B/12B/27B | gemma3/gemma (1B) | +| [GLM-4/GLM-4-0414/GLM-Z1](https://huggingface.co/THUDM) | 9B/32B | glm4 | +| [GPT-2](https://huggingface.co/openai-community) | 0.1B/0.4B/0.8B/1.5B | - | +| [Granite 3.0-3.3](https://huggingface.co/ibm-granite) | 1B/2B/3B/8B | granite3 | +| [Hunyuan](https://huggingface.co/tencent/) | 7B | hunyuan | +| [Index](https://huggingface.co/IndexTeam) | 1.9B | index | +| [InternLM 2-3](https://huggingface.co/internlm) | 7B/8B/20B | intern2 | +| [InternVL 2.5-3](https://huggingface.co/OpenGVLab)\*\* | 1B/2B/4B/8B/9B/14B/26B/38B/78B | intern_vl | +| [Kimi-VL](https://huggingface.co/moonshotai) | 16B | kimi_vl | +| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - | +| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 | +| [Llama 3-3.3](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 | +| [Llama 4](https://huggingface.co/meta-llama) | 109B/402B | llama4 | +| [Llama 3.2 Vision](https://huggingface.co/meta-llama) | 11B/90B | mllama | +| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava | +| [LLaVA-NeXT](https://huggingface.co/llava-hf) | 7B/8B/13B/34B/72B/110B | llava_next | +| [LLaVA-NeXT-Video](https://huggingface.co/llava-hf) | 7B/34B | llava_next_video | +| [MiniCPM](https://huggingface.co/openbmb) | 1B/2B/4B | cpm/cpm3 | +| [MiniCPM-o-2.6/MiniCPM-V-2.6](https://huggingface.co/openbmb) | 8B | minicpm_o/minicpm_v | +| [Ministral/Mistral-Nemo](https://huggingface.co/mistralai) | 8B/12B | ministral | +| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral | +| [Mistral Small](https://huggingface.co/mistralai) | 24B | mistral_small | +| [OLMo](https://huggingface.co/allenai) | 1B/7B | - | +| [PaliGemma/PaliGemma2](https://huggingface.co/google) | 3B/10B/28B | paligemma | +| [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - | +| [Phi-3/Phi-3.5](https://huggingface.co/microsoft) | 4B/14B | phi | +| [Phi-3-small](https://huggingface.co/microsoft) | 7B | phi_small | +| [Phi-4](https://huggingface.co/microsoft) | 14B | phi4 | +| [Pixtral](https://huggingface.co/mistralai) | 12B | pixtral | +| [Qwen (1-2.5) (Code/Math/MoE/QwQ)](https://huggingface.co/Qwen) | 0.5B/1.5B/3B/7B/14B/32B/72B/110B | qwen | +| [Qwen2-Audio](https://huggingface.co/Qwen) | 7B | qwen2_audio | +| [Qwen2.5-Omni](https://huggingface.co/Qwen)\*\* | 7B | qwen2_omni | +| [Qwen2-VL/Qwen2.5-VL/QVQ](https://huggingface.co/Qwen) | 2B/3B/7B/32B/72B | qwen2_vl | +| [Skywork o1](https://huggingface.co/Skywork) | 8B | skywork_o1 | +| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - | +| [TeleChat2](https://huggingface.co/Tele-AI) | 3B/7B/35B/115B | telechat2 | +| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse | +| [Yi/Yi-1.5 (Code)](https://huggingface.co/01-ai) | 1.5B/6B/9B/34B | yi | +| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl | +| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan | + +> [!NOTE] +> 对于所有“基座”(Base)模型,`template` 参数可以是 `default`, `alpaca`, `vicuna` 等任意值。但“对话”(Instruct/Chat)模型请务必使用**对应的模板**。 +> +> 请务必在训练和推理时采用**完全一致**的模板。 +> +> \*:您需要从 main 分支安装 `transformers` 并使用 `DISABLE_VERSION_CHECK=1` 来跳过版本检查。 +> +> \*\*:您需要安装特定版本的 `transformers` 以使用该模型。 + +项目所支持模型的完整列表请参阅 [constants.py](src/llamafactory/extras/constants.py)。 + +您也可以在 [template.py](src/llamafactory/data/template.py) 中添加自己的对话模板。 + +## 训练方法 + +| 方法 | 全参数训练 | 部分参数训练 | LoRA | QLoRA | +| --------------------- | ------------------ | ------------------ | ------------------ | ------------------ | +| 预训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| 指令监督微调 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| 奖励模型训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| PPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| DPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| KTO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| ORPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| SimPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | + +> [!TIP] +> 有关 PPO 的实现细节,请参考[此博客](https://newfacade.github.io/notes-on-reinforcement-learning/17-ppo-trl.html)。 + +## 数据集 + +
预训练数据集 + +- [Wiki Demo (en)](data/wiki_demo.txt) +- [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) +- [RedPajama V2 (en)](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-V2) +- [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220) +- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered) +- [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile) +- [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B) +- [FineWeb (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb) +- [FineWeb-Edu (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu) +- [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack) +- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata) + +
+ +
指令微调数据集 + +- [Identity (en&zh)](data/identity.json) +- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca) +- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca-3) +- [Alpaca GPT4 (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM) +- [Glaive Function Calling V2 (en&zh)](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2) +- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima) +- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset) +- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN) +- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN) +- [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN) +- [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M) +- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M) +- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M) +- [UltraChat (en)](https://github.com/thunlp/UltraChat) +- [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus) +- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k) +- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT) +- [OpenOrca (en)](https://huggingface.co/datasets/Open-Orca/OpenOrca) +- [SlimOrca (en)](https://huggingface.co/datasets/Open-Orca/SlimOrca) +- [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct) +- [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M) +- [Wiki QA (en)](https://huggingface.co/datasets/wiki_qa) +- [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa) +- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn) +- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar) +- [deepctrl (en&zh)](https://www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data) +- [Advertise Generating (zh)](https://huggingface.co/datasets/HasturOfficial/adgen) +- [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k) +- [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4) +- [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k) +- [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct) +- [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m) +- [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k) +- [Cosmopedia (en)](https://huggingface.co/datasets/HuggingFaceTB/cosmopedia) +- [STEM (zh)](https://huggingface.co/datasets/hfl/stem_zh_instruction) +- [Ruozhiba (zh)](https://huggingface.co/datasets/hfl/ruozhiba_gpt4_turbo) +- [Neo-sft (zh)](https://huggingface.co/datasets/m-a-p/neo_sft_phase2) +- [Magpie-Pro-300K-Filtered (en)](https://huggingface.co/datasets/Magpie-Align/Magpie-Pro-300K-Filtered) +- [Magpie-ultra-v0.1 (en)](https://huggingface.co/datasets/argilla/magpie-ultra-v0.1) +- [WebInstructSub (en)](https://huggingface.co/datasets/TIGER-Lab/WebInstructSub) +- [OpenO1-SFT (en&zh)](https://huggingface.co/datasets/O1-OPEN/OpenO1-SFT) +- [Open-Thoughts (en)](https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k) +- [Open-R1-Math (en)](https://huggingface.co/datasets/open-r1/OpenR1-Math-220k) +- [Chinese-DeepSeek-R1-Distill (zh)](https://huggingface.co/datasets/Congliu/Chinese-DeepSeek-R1-Distill-data-110k-SFT) +- [LLaVA mixed (en&zh)](https://huggingface.co/datasets/BUAADreamer/llava-en-zh-300k) +- [Pokemon-gpt4o-captions (en&zh)](https://huggingface.co/datasets/jugg1024/pokemon-gpt4o-captions) +- [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de) +- [Dolly 15k (de)](https://huggingface.co/datasets/mayflowergmbh/dolly-15k_de) +- [Alpaca GPT4 (de)](https://huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de) +- [OpenSchnabeltier (de)](https://huggingface.co/datasets/mayflowergmbh/openschnabeltier_de) +- [Evol Instruct (de)](https://huggingface.co/datasets/mayflowergmbh/evol-instruct_de) +- [Dolphin (de)](https://huggingface.co/datasets/mayflowergmbh/dolphin_de) +- [Booksum (de)](https://huggingface.co/datasets/mayflowergmbh/booksum_de) +- [Airoboros (de)](https://huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de) +- [Ultrachat (de)](https://huggingface.co/datasets/mayflowergmbh/ultra-chat_de) + +
+ +
偏好数据集 + +- [DPO mixed (en&zh)](https://huggingface.co/datasets/hiyouga/DPO-En-Zh-20k) +- [UltraFeedback (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized) +- [COIG-P (en&zh)](https://huggingface.co/datasets/m-a-p/COIG-P) +- [RLHF-V (en)](https://huggingface.co/datasets/openbmb/RLHF-V-Dataset) +- [VLFeedback (en)](https://huggingface.co/datasets/Zhihui/VLFeedback) +- [Orca DPO Pairs (en)](https://huggingface.co/datasets/Intel/orca_dpo_pairs) +- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf) +- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar) +- [Orca DPO (de)](https://huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de) +- [KTO mixed (en)](https://huggingface.co/datasets/argilla/kto-mix-15k) + +
+ +部分数据集的使用需要确认,我们推荐使用下述命令登录您的 Hugging Face 账户。 + +```bash +pip install --upgrade huggingface_hub +huggingface-cli login +``` + +## 软硬件依赖 + +| 必需项 | 至少 | 推荐 | +| ------------ | ------- | --------- | +| python | 3.9 | 3.10 | +| torch | 2.0.0 | 2.6.0 | +| transformers | 4.45.0 | 4.50.0 | +| datasets | 2.16.0 | 3.2.0 | +| accelerate | 0.34.0 | 1.2.1 | +| peft | 0.14.0 | 0.15.1 | +| trl | 0.8.6 | 0.9.6 | + +| 可选项 | 至少 | 推荐 | +| ------------ | ------- | --------- | +| CUDA | 11.6 | 12.2 | +| deepspeed | 0.10.0 | 0.16.4 | +| bitsandbytes | 0.39.0 | 0.43.1 | +| vllm | 0.4.3 | 0.8.2 | +| flash-attn | 2.5.6 | 2.7.2 | + +### 硬件依赖 + +\* *估算值* + +| 方法 | 精度 | 7B | 14B | 30B | 70B | `x`B | +| ------------------------------- | ---- | ----- | ----- | ----- | ------ | ------- | +| Full (`bf16` or `fp16`) | 32 | 120GB | 240GB | 600GB | 1200GB | `18x`GB | +| Full (`pure_bf16`) | 16 | 60GB | 120GB | 300GB | 600GB | `8x`GB | +| Freeze/LoRA/GaLore/APOLLO/BAdam | 16 | 16GB | 32GB | 64GB | 160GB | `2x`GB | +| QLoRA | 8 | 10GB | 20GB | 40GB | 80GB | `x`GB | +| QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | `x/2`GB | +| QLoRA | 2 | 4GB | 8GB | 16GB | 24GB | `x/4`GB | + +## 如何使用 + +### 安装 LLaMA Factory + +> [!IMPORTANT] +> 此步骤为必需。 + +```bash +git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git +cd LLaMA-Factory +pip install -e ".[torch,metrics]" +``` + +可选的额外依赖项:torch、torch-npu、metrics、deepspeed、liger-kernel、bitsandbytes、hqq、eetq、gptq、awq、aqlm、vllm、sglang、galore、apollo、badam、adam-mini、qwen、minicpm_v、modelscope、openmind、swanlab、quality + +> [!TIP] +> 遇到包冲突时,可使用 `pip install --no-deps -e .` 解决。 + +
使用 uv 构建虚拟环境 + +使用 [uv](https://github.com/astral-sh/uv) 创建隔离的 Python 环境: + +```bash +uv sync --extra torch --extra metrics --prerelease=allow +``` + +在环境中运行 LLaMA-Factory: + +```bash +uv run --prerelease=allow llamafactory-cli train examples/train_lora/llama3_lora_pretrain.yaml +``` + +
+ + +
Windows 用户指南 + +#### 安装 BitsAndBytes + +如果要在 Windows 平台上开启量化 LoRA(QLoRA),需要安装预编译的 `bitsandbytes` 库, 支持 CUDA 11.1 到 12.2, 请根据您的 CUDA 版本情况选择适合的[发布版本](https://github.com/jllllll/bitsandbytes-windows-webui/releases/tag/wheels)。 + +```bash +pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.2.post2-py3-none-win_amd64.whl +``` + +#### 安装 Flash Attention-2 + +如果要在 Windows 平台上开启 FlashAttention-2,请使用 [flash-attention-windows-wheel](https://huggingface.co/lldacing/flash-attention-windows-wheel) 中的脚本自行编译与安装。 + +
+ +
昇腾 NPU 用户指南 + +在昇腾 NPU 设备上安装 LLaMA Factory 时,请升级 Python 到 3.10 及以上,并需要指定额外依赖项,使用 `pip install -e ".[torch-npu,metrics]"` 命令安装。此外,还需要安装 **[Ascend CANN Toolkit 与 Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**,安装方法请参考[安装教程](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/80RC2alpha002/quickstart/quickstart/quickstart_18_0004.html)或使用以下命令: + +```bash +# 请替换 URL 为 CANN 版本和设备型号对应的 URL +# 安装 CANN Toolkit +wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run +bash Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run --install + +# 安装 CANN Kernels +wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run +bash Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run --install + +# 设置环境变量 +source /usr/local/Ascend/ascend-toolkit/set_env.sh +``` + +| 依赖项 | 至少 | 推荐 | +| ------------ | ------- | -------------- | +| CANN | 8.0.RC1 | 8.0.0.alpha002 | +| torch | 2.1.0 | 2.4.0 | +| torch-npu | 2.1.0 | 2.4.0.post2 | +| deepspeed | 0.13.2 | 0.13.2 | +| vllm-ascend | - | 0.7.3 | + +请使用 `ASCEND_RT_VISIBLE_DEVICES` 而非 `CUDA_VISIBLE_DEVICES` 来指定运算设备。 + +如果遇到无法正常推理的情况,请尝试设置 `do_sample: false`。 + +下载预构建 Docker 镜像:[32GB](http://mirrors.cn-central-221.ovaijisuan.com/detail/130.html) | [64GB](http://mirrors.cn-central-221.ovaijisuan.com/detail/131.html) + +#### 安装 BitsAndBytes + +如果要在 Ascend NPU 上进行基于 bitsandbytes 的 QLoRA 量化微调,请执行如下步骤: + +1. 手动编译 bitsandbytes:请参考[安装文档](https://huggingface.co/docs/bitsandbytes/installation?backend=Ascend+NPU&platform=Ascend+NPU)完成 NPU 版的 bitsandbytes 安装,编译要求环境 cmake 版本不低于 3.22.1,g++ 版本不低于 12.x。 + +```bash +# 从源码安装 bitsandbytes +# 克隆 bitsandbytes 仓库, Ascend NPU 目前在 multi-backend-refactor 中支持 +git clone -b multi-backend-refactor https://github.com/bitsandbytes-foundation/bitsandbytes.git +cd bitsandbytes/ + +# 安装依赖 +pip install -r requirements-dev.txt + +# 安装编译工具依赖,该步骤在不同系统上命令有所不同,供参考 +apt-get install -y build-essential cmake + +# 编译 & 安装 +cmake -DCOMPUTE_BACKEND=npu -S . +make +pip install . +``` + +2. 安装 transformers 的 main 分支版本。 + +```bash +git clone -b main https://github.com/huggingface/transformers.git +cd transformers +pip install . +``` + +3. 在训练参数中设置 `double_quantization: false`,可参考[示例](examples/train_qlora/llama3_lora_sft_bnb_npu.yaml)。 + +
+ +### 数据准备 + +关于数据集文件的格式,请参考 [data/README_zh.md](data/README_zh.md) 的内容。你可以使用 HuggingFace / ModelScope / Modelers 上的数据集或加载本地数据集。 + +> [!NOTE] +> 使用自定义数据集时,请更新 `data/dataset_info.json` 文件。 + +您也可以使用 **[Easy Dataset](https://github.com/ConardLi/easy-dataset)** 构建用于微调的合成数据。 + +### 快速开始 + +下面三行命令分别对 Llama3-8B-Instruct 模型进行 LoRA **微调**、**推理**和**合并**。 + +```bash +llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml +llamafactory-cli chat examples/inference/llama3_lora_sft.yaml +llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml +``` + +高级用法请参考 [examples/README_zh.md](examples/README_zh.md)(包括多 GPU 微调)。 + +> [!TIP] +> 使用 `llamafactory-cli help` 显示帮助信息。 +> +> 遇到报错请先看[常见问题](https://github.com/hiyouga/LLaMA-Factory/issues/4614)。 + +### LLaMA Board 可视化微调(由 [Gradio](https://github.com/gradio-app/gradio) 驱动) + +```bash +llamafactory-cli webui +``` + +### 构建 Docker + +CUDA 用户: + +```bash +cd docker/docker-cuda/ +docker compose up -d +docker compose exec llamafactory bash +``` + +昇腾 NPU 用户: + +```bash +cd docker/docker-npu/ +docker compose up -d +docker compose exec llamafactory bash +``` + +AMD ROCm 用户: + +```bash +cd docker/docker-rocm/ +docker compose up -d +docker compose exec llamafactory bash +``` + +
不使用 Docker Compose 构建 + +CUDA 用户: + +```bash +docker build -f ./docker/docker-cuda/Dockerfile \ + --build-arg INSTALL_BNB=false \ + --build-arg INSTALL_VLLM=false \ + --build-arg INSTALL_DEEPSPEED=false \ + --build-arg INSTALL_FLASHATTN=false \ + --build-arg PIP_INDEX=https://pypi.org/simple \ + -t llamafactory:latest . + +docker run -dit --gpus=all \ + -v ./hf_cache:/root/.cache/huggingface \ + -v ./ms_cache:/root/.cache/modelscope \ + -v ./om_cache:/root/.cache/openmind \ + -v ./data:/app/data \ + -v ./output:/app/output \ + -p 7860:7860 \ + -p 8000:8000 \ + --shm-size 16G \ + --name llamafactory \ + llamafactory:latest + +docker exec -it llamafactory bash +``` + +昇腾 NPU 用户: + +```bash +# 根据您的环境选择镜像 +docker build -f ./docker/docker-npu/Dockerfile \ + --build-arg INSTALL_DEEPSPEED=false \ + --build-arg PIP_INDEX=https://pypi.org/simple \ + -t llamafactory:latest . + +# 根据您的资源更改 `device` +docker run -dit \ + -v ./hf_cache:/root/.cache/huggingface \ + -v ./ms_cache:/root/.cache/modelscope \ + -v ./om_cache:/root/.cache/openmind \ + -v ./data:/app/data \ + -v ./output:/app/output \ + -v /usr/local/dcmi:/usr/local/dcmi \ + -v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \ + -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \ + -v /etc/ascend_install.info:/etc/ascend_install.info \ + -p 7860:7860 \ + -p 8000:8000 \ + --device /dev/davinci0 \ + --device /dev/davinci_manager \ + --device /dev/devmm_svm \ + --device /dev/hisi_hdc \ + --shm-size 16G \ + --name llamafactory \ + llamafactory:latest + +docker exec -it llamafactory bash +``` + +AMD ROCm 用户: + +```bash +docker build -f ./docker/docker-rocm/Dockerfile \ + --build-arg INSTALL_BNB=false \ + --build-arg INSTALL_VLLM=false \ + --build-arg INSTALL_DEEPSPEED=false \ + --build-arg INSTALL_FLASHATTN=false \ + --build-arg PIP_INDEX=https://pypi.org/simple \ + -t llamafactory:latest . + +docker run -dit \ + -v ./hf_cache:/root/.cache/huggingface \ + -v ./ms_cache:/root/.cache/modelscope \ + -v ./om_cache:/root/.cache/openmind \ + -v ./data:/app/data \ + -v ./output:/app/output \ + -v ./saves:/app/saves \ + -p 7860:7860 \ + -p 8000:8000 \ + --device /dev/kfd \ + --device /dev/dri \ + --shm-size 16G \ + --name llamafactory \ + llamafactory:latest + +docker exec -it llamafactory bash +``` + +
+ +
数据卷详情 + +- `hf_cache`:使用宿主机的 Hugging Face 缓存文件夹,允许更改为新的目录。 +- `ms_cache`:类似 Hugging Face 缓存文件夹,为 ModelScope 用户提供。 +- `om_cache`:类似 Hugging Face 缓存文件夹,为 Modelers 用户提供。 +- `data`:宿主机中存放数据集的文件夹路径。 +- `output`:将导出目录设置为该路径后,即可在宿主机中访问导出后的模型。 + +
+ +### 利用 vLLM 部署 OpenAI API + +```bash +API_PORT=8000 llamafactory-cli api examples/inference/llama3_vllm.yaml +``` + +> [!TIP] +> API 文档请查阅[这里](https://platform.openai.com/docs/api-reference/chat/create)。 +> +> 示例:[图像理解](scripts/api_example/test_image.py) | [工具调用](scripts/api_example/test_toolcall.py) + +### 从魔搭社区下载 + +如果您在 Hugging Face 模型和数据集的下载中遇到了问题,可以通过下述方法使用魔搭社区。 + +```bash +export USE_MODELSCOPE_HUB=1 # Windows 使用 `set USE_MODELSCOPE_HUB=1` +``` + +将 `model_name_or_path` 设置为模型 ID 来加载对应的模型。在[魔搭社区](https://modelscope.cn/models)查看所有可用的模型,例如 `LLM-Research/Meta-Llama-3-8B-Instruct`。 + +### 从魔乐社区下载 + +您也可以通过下述方法,使用魔乐社区下载数据集和模型。 + +```bash +export USE_OPENMIND_HUB=1 # Windows 使用 `set USE_OPENMIND_HUB=1` +``` + +将 `model_name_or_path` 设置为模型 ID 来加载对应的模型。在[魔乐社区](https://modelers.cn/models)查看所有可用的模型,例如 `TeleAI/TeleChat-7B-pt`。 + +### 使用 W&B 面板 + +若要使用 [Weights & Biases](https://wandb.ai) 记录实验数据,请在 yaml 文件中添加下面的参数。 + +```yaml +report_to: wandb +run_name: test_run # 可选 +``` + +在启动训练任务时,将 `WANDB_API_KEY` 设置为[密钥](https://wandb.ai/authorize)来登录 W&B 账户。 + +### 使用 SwanLab 面板 + +若要使用 [SwanLab](https://github.com/SwanHubX/SwanLab) 记录实验数据,请在 yaml 文件中添加下面的参数。 + +```yaml +use_swanlab: true +swanlab_run_name: test_run # 可选 +``` + +在启动训练任务时,登录SwanLab账户有以下三种方式: + +方式一:在 yaml 文件中添加 `swanlab_api_key=` ,并设置为你的 [API 密钥](https://swanlab.cn/settings)。 +方式二:将环境变量 `SWANLAB_API_KEY` 设置为你的 [API 密钥](https://swanlab.cn/settings)。 +方式三:启动前使用 `swanlab login` 命令完成登录。 + +## 使用了 LLaMA Factory 的项目 + +如果您有项目希望添加至下述列表,请通过邮件联系或者创建一个 PR。 + +
点击显示 + +1. Wang et al. ESRL: Efficient Sampling-based Reinforcement Learning for Sequence Generation. 2023. [[arxiv]](https://arxiv.org/abs/2308.02223) +1. Yu et al. Open, Closed, or Small Language Models for Text Classification? 2023. [[arxiv]](https://arxiv.org/abs/2308.10092) +1. Wang et al. UbiPhysio: Support Daily Functioning, Fitness, and Rehabilitation with Action Understanding and Feedback in Natural Language. 2023. [[arxiv]](https://arxiv.org/abs/2308.10526) +1. Luceri et al. Leveraging Large Language Models to Detect Influence Campaigns in Social Media. 2023. [[arxiv]](https://arxiv.org/abs/2311.07816) +1. Zhang et al. Alleviating Hallucinations of Large Language Models through Induced Hallucinations. 2023. [[arxiv]](https://arxiv.org/abs/2312.15710) +1. Wang et al. Know Your Needs Better: Towards Structured Understanding of Marketer Demands with Analogical Reasoning Augmented LLMs. KDD 2024. [[arxiv]](https://arxiv.org/abs/2401.04319) +1. Wang et al. CANDLE: Iterative Conceptualization and Instantiation Distillation from Large Language Models for Commonsense Reasoning. ACL 2024. [[arxiv]](https://arxiv.org/abs/2401.07286) +1. Choi et al. FACT-GPT: Fact-Checking Augmentation via Claim Matching with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2402.05904) +1. Zhang et al. AutoMathText: Autonomous Data Selection with Language Models for Mathematical Texts. 2024. [[arxiv]](https://arxiv.org/abs/2402.07625) +1. Lyu et al. KnowTuning: Knowledge-aware Fine-tuning for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11176) +1. Yang et al. LaCo: Large Language Model Pruning via Layer Collaps. 2024. [[arxiv]](https://arxiv.org/abs/2402.11187) +1. Bhardwaj et al. Language Models are Homer Simpson! Safety Re-Alignment of Fine-tuned Language Models through Task Arithmetic. 2024. [[arxiv]](https://arxiv.org/abs/2402.11746) +1. Yang et al. Enhancing Empathetic Response Generation by Augmenting LLMs with Small-scale Empathetic Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11801) +1. Yi et al. Generation Meets Verification: Accelerating Large Language Model Inference with Smart Parallel Auto-Correct Decoding. ACL 2024 Findings. [[arxiv]](https://arxiv.org/abs/2402.11809) +1. Cao et al. Head-wise Shareable Attention for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11819) +1. Zhang et al. Enhancing Multilingual Capabilities of Large Language Models through Self-Distillation from Resource-Rich Languages. 2024. [[arxiv]](https://arxiv.org/abs/2402.12204) +1. Kim et al. Efficient and Effective Vocabulary Expansion Towards Multilingual Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.14714) +1. Yu et al. KIEval: A Knowledge-grounded Interactive Evaluation Framework for Large Language Models. ACL 2024. [[arxiv]](https://arxiv.org/abs/2402.15043) +1. Huang et al. Key-Point-Driven Data Synthesis with its Enhancement on Mathematical Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2403.02333) +1. Duan et al. Negating Negatives: Alignment without Human Positive Samples via Distributional Dispreference Optimization. 2024. [[arxiv]](https://arxiv.org/abs/2403.03419) +1. Xie and Schwertfeger. Empowering Robotics with Large Language Models: osmAG Map Comprehension with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2403.08228) +1. Wu et al. Large Language Models are Parallel Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2403.09073) +1. Zhang et al. EDT: Improving Large Language Models' Generation by Entropy-based Dynamic Temperature Sampling. 2024. [[arxiv]](https://arxiv.org/abs/2403.14541) +1. Weller et al. FollowIR: Evaluating and Teaching Information Retrieval Models to Follow Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2403.15246) +1. Hongbin Na. CBT-LLM: A Chinese Large Language Model for Cognitive Behavioral Therapy-based Mental Health Question Answering. COLING 2024. [[arxiv]](https://arxiv.org/abs/2403.16008) +1. Zan et al. CodeS: Natural Language to Code Repository via Multi-Layer Sketch. 2024. [[arxiv]](https://arxiv.org/abs/2403.16443) +1. Liu et al. Extensive Self-Contrast Enables Feedback-Free Language Model Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2404.00604) +1. Luo et al. BAdam: A Memory Efficient Full Parameter Training Method for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.02827) +1. Du et al. Chinese Tiny LLM: Pretraining a Chinese-Centric Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2404.04167) +1. Ma et al. Parameter Efficient Quasi-Orthogonal Fine-Tuning via Givens Rotation. ICML 2024. [[arxiv]](https://arxiv.org/abs/2404.04316) +1. Liu et al. Dynamic Generation of Personalities with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.07084) +1. Shang et al. How Far Have We Gone in Stripped Binary Code Understanding Using Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.09836) +1. Huang et al. LLMTune: Accelerate Database Knob Tuning with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.11581) +1. Deng et al. Text-Tuple-Table: Towards Information Integration in Text-to-Table Generation via Global Tuple Extraction. 2024. [[arxiv]](https://arxiv.org/abs/2404.14215) +1. Acikgoz et al. Hippocrates: An Open-Source Framework for Advancing Large Language Models in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2404.16621) +1. Zhang et al. Small Language Models Need Strong Verifiers to Self-Correct Reasoning. ACL 2024 Findings. [[arxiv]](https://arxiv.org/abs/2404.17140) +1. Zhou et al. FREB-TQA: A Fine-Grained Robustness Evaluation Benchmark for Table Question Answering. NAACL 2024. [[arxiv]](https://arxiv.org/abs/2404.18585) +1. Xu et al. Large Language Models for Cyber Security: A Systematic Literature Review. 2024. [[arxiv]](https://arxiv.org/abs/2405.04760) +1. Dammu et al. "They are uncultured": Unveiling Covert Harms and Social Threats in LLM Generated Conversations. 2024. [[arxiv]](https://arxiv.org/abs/2405.05378) +1. Yi et al. A safety realignment framework via subspace-oriented model fusion for large language models. 2024. [[arxiv]](https://arxiv.org/abs/2405.09055) +1. Lou et al. SPO: Multi-Dimensional Preference Sequential Alignment With Implicit Reward Modeling. 2024. [[arxiv]](https://arxiv.org/abs/2405.12739) +1. Zhang et al. Getting More from Less: Large Language Models are Good Spontaneous Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2405.13816) +1. Zhang et al. TS-Align: A Teacher-Student Collaborative Framework for Scalable Iterative Finetuning of Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2405.20215) +1. Zihong Chen. Sentence Segmentation and Sentence Punctuation Based on XunziALLM. 2024. [[paper]](https://aclanthology.org/2024.lt4hala-1.30) +1. Gao et al. The Best of Both Worlds: Toward an Honest and Helpful Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2406.00380) +1. Wang and Song. MARS: Benchmarking the Metaphysical Reasoning Abilities of Language Models with a Multi-task Evaluation Dataset. 2024. [[arxiv]](https://arxiv.org/abs/2406.02106) +1. Hu et al. Computational Limits of Low-Rank Adaptation (LoRA) for Transformer-Based Models. 2024. [[arxiv]](https://arxiv.org/abs/2406.03136) +1. Ge et al. Time Sensitive Knowledge Editing through Efficient Finetuning. ACL 2024. [[arxiv]](https://arxiv.org/abs/2406.04496) +1. Tan et al. Peer Review as A Multi-Turn and Long-Context Dialogue with Role-Based Interactions. 2024. [[arxiv]](https://arxiv.org/abs/2406.05688) +1. Song et al. Turbo Sparse: Achieving LLM SOTA Performance with Minimal Activated Parameters. 2024. [[arxiv]](https://arxiv.org/abs/2406.05955) +1. Gu et al. RWKV-CLIP: A Robust Vision-Language Representation Learner. 2024. [[arxiv]](https://arxiv.org/abs/2406.06973) +1. Chen et al. Advancing Tool-Augmented Large Language Models: Integrating Insights from Errors in Inference Trees. 2024. [[arxiv]](https://arxiv.org/abs/2406.07115) +1. Zhu et al. Are Large Language Models Good Statisticians?. 2024. [[arxiv]](https://arxiv.org/abs/2406.07815) +1. Li et al. Know the Unknown: An Uncertainty-Sensitive Method for LLM Instruction Tuning. 2024. [[arxiv]](https://arxiv.org/abs/2406.10099) +1. Ding et al. IntentionQA: A Benchmark for Evaluating Purchase Intention Comprehension Abilities of Language Models in E-commerce. 2024. [[arxiv]](https://arxiv.org/abs/2406.10173) +1. He et al. COMMUNITY-CROSS-INSTRUCT: Unsupervised Instruction Generation for Aligning Large Language Models to Online Communities. 2024. [[arxiv]](https://arxiv.org/abs/2406.12074) +1. Lin et al. FVEL: Interactive Formal Verification Environment with Large Language Models via Theorem Proving. 2024. [[arxiv]](https://arxiv.org/abs/2406.14408) +1. Treutlein et al. Connecting the Dots: LLMs can Infer and Verbalize Latent Structure from Disparate Training Data. 2024. [[arxiv]](https://arxiv.org/abs/2406.14546) +1. Feng et al. SS-Bench: A Benchmark for Social Story Generation and Evaluation. 2024. [[arxiv]](https://arxiv.org/abs/2406.15695) +1. Feng et al. Self-Constructed Context Decompilation with Fined-grained Alignment Enhancement. 2024. [[arxiv]](https://arxiv.org/abs/2406.17233) +1. Liu et al. Large Language Models for Cuffless Blood Pressure Measurement From Wearable Biosignals. 2024. [[arxiv]](https://arxiv.org/abs/2406.18069) +1. Iyer et al. Exploring Very Low-Resource Translation with LLMs: The University of Edinburgh's Submission to AmericasNLP 2024 Translation Task. AmericasNLP 2024. [[paper]](https://aclanthology.org/2024.americasnlp-1.25) +1. Li et al. Calibrating LLMs with Preference Optimization on Thought Trees for Generating Rationale in Science Question Scoring. 2024. [[arxiv]](https://arxiv.org/abs/2406.19949) +1. Yang et al. Financial Knowledge Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2407.00365) +1. Lin et al. DogeRM: Equipping Reward Models with Domain Knowledge through Model Merging. 2024. [[arxiv]](https://arxiv.org/abs/2407.01470) +1. Bako et al. Evaluating the Semantic Profiling Abilities of LLMs for Natural Language Utterances in Data Visualization. 2024. [[arxiv]](https://arxiv.org/abs/2407.06129) +1. Huang et al. RoLoRA: Fine-tuning Rotated Outlier-free LLMs for Effective Weight-Activation Quantization. 2024. [[arxiv]](https://arxiv.org/abs/2407.08044) +1. Jiang et al. LLM-Collaboration on Automatic Science Journalism for the General Audience. 2024. [[arxiv]](https://arxiv.org/abs/2407.09756) +1. Inouye et al. Applied Auto-tuning on LoRA Hyperparameters. 2024. [[paper]](https://scholarcommons.scu.edu/cseng_senior/272/) +1. Qi et al. Research on Tibetan Tourism Viewpoints information generation system based on LLM. 2024. [[arxiv]](https://arxiv.org/abs/2407.13561) +1. Xu et al. Course-Correction: Safety Alignment Using Synthetic Preferences. 2024. [[arxiv]](https://arxiv.org/abs/2407.16637) +1. Sun et al. LAMBDA: A Large Model Based Data Agent. 2024. [[arxiv]](https://arxiv.org/abs/2407.17535) +1. Zhu et al. CollectiveSFT: Scaling Large Language Models for Chinese Medical Benchmark with Collective Instructions in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2407.19705) +1. Yu et al. Correcting Negative Bias in Large Language Models through Negative Attention Score Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2408.00137) +1. Xie et al. The Power of Personalized Datasets: Advancing Chinese Composition Writing for Elementary School through Targeted Model Fine-Tuning. IALP 2024. [[paper]](https://www.asianlp.sg/conferences/ialp2024/proceedings/papers/IALP2024_P055.pdf) +1. Liu et al. Instruct-Code-Llama: Improving Capabilities of Language Model in Competition Level Code Generation by Online Judge Feedback. ICIC 2024. [[paper]](https://link.springer.com/chapter/10.1007/978-981-97-5669-8_11) +1. Wang et al. Cybernetic Sentinels: Unveiling the Impact of Safety Data Selection on Model Security in Supervised Fine-Tuning. ICIC 2024. [[paper]](https://link.springer.com/chapter/10.1007/978-981-97-5669-8_23) +1. Xia et al. Understanding the Performance and Estimating the Cost of LLM Fine-Tuning. 2024. [[arxiv]](https://arxiv.org/abs/2408.04693) +1. Zeng et al. Perceive, Reflect, and Plan: Designing LLM Agent for Goal-Directed City Navigation without Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2408.04168) +1. Xia et al. Using Pre-trained Language Model for Accurate ESG Prediction. FinNLP 2024. [[paper]](https://aclanthology.org/2024.finnlp-2.1/) +1. Liang et al. I-SHEEP: Self-Alignment of LLM from Scratch through an Iterative Self-Enhancement Paradigm. 2024. [[arxiv]](https://arxiv.org/abs/2408.08072) +1. Bai et al. Aligning Large Language Model with Direct Multi-Preference Optimization for Recommendation. CIKM 2024. [[paper]](https://dl.acm.org/doi/10.1145/3627673.3679611) +1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: 天文大模型 StarWhisper,基于 ChatGLM2-6B 和 Qwen-14B 在天文数据上微调而得。 +1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: 中文法律领域大模型 DISC-LawLLM,基于 Baichuan-13B 微调而得,具有法律推理和知识检索能力。 +1. **[Sunsimiao](https://github.com/X-D-Lab/Sunsimiao)**: 孙思邈中文医疗大模型 Sumsimiao,基于 Baichuan-7B 和 ChatGLM-6B 在中文医疗数据上微调而得。 +1. **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: 医疗大模型项目 CareGPT,基于 LLaMA2-7B 和 Baichuan-13B 在中文医疗数据上微调而得。 +1. **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**:MBTI性格大模型项目,根据数据集与训练方式让任意 LLM 拥有 16 个不同的性格类型。 +1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**:一个用于生成 Stable Diffusion 提示词的大型语言模型。[[demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt) +1. **[Chinese-LLaVA-Med](https://github.com/BUAADreamer/Chinese-LLaVA-Med)**:中文多模态医学大模型,基于 LLaVA-1.5-7B 在中文多模态医疗数据上微调而得。 +1. **[AutoRE](https://github.com/THUDM/AutoRE)**:基于大语言模型的文档级关系抽取系统。 +1. **[NVIDIA RTX AI Toolkit](https://github.com/NVIDIA/RTX-AI-Toolkit)**:在 Windows 主机上利用英伟达 RTX 设备进行大型语言模型微调的开发包。 +1. **[LazyLLM](https://github.com/LazyAGI/LazyLLM)**:一个低代码构建多 Agent 大模型应用的开发工具,支持基于 LLaMA Factory 的模型微调. +1. **[RAG-Retrieval](https://github.com/NLPJCL/RAG-Retrieval)**:一个全链路 RAG 检索模型微调、推理和蒸馏代码库。[[blog]](https://zhuanlan.zhihu.com/p/987727357) +1. **[360-LLaMA-Factory](https://github.com/Qihoo360/360-LLaMA-Factory)**:一个魔改后的代码库,通过 Ring Attention 支持长序列的 SFT 和 DPO 训练。 +1. **[Sky-T1](https://novasky-ai.github.io/posts/sky-t1/)**:由 NovaSky AI 微调的低成本类 o1 长推理模型。 + +
+ +## 协议 + +本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源。 + +使用模型权重时,请遵循对应的模型协议:[Baichuan 2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Command R](https://cohere.com/c4ai-cc-by-nc-license) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [GLM-4](https://huggingface.co/THUDM/glm-4-9b/blob/main/LICENSE) / [GPT-2](https://github.com/openai/gpt-2/blob/master/LICENSE) / [Granite](LICENSE) / [Index](https://huggingface.co/IndexTeam/Index-1.9B/blob/main/LICENSE) / [InternLM](https://github.com/InternLM/InternLM#license) / [Llama](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [Llama 2](https://ai.meta.com/llama/license/) / [Llama 3](https://llama.meta.com/llama3/license/) / [Llama 4](https://github.com/meta-llama/llama-models/blob/main/models/llama4/LICENSE) / [MiniCPM](https://github.com/OpenBMB/MiniCPM/blob/main/MiniCPM%20Model%20License.md) / [Mistral/Mixtral/Pixtral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/Phi-2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Phi-3/Phi-4](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [Skywork](https://huggingface.co/Skywork/Skywork-13B-base/blob/main/Skywork%20Community%20License.pdf) / [StarCoder 2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [TeleChat2](https://huggingface.co/Tele-AI/telechat-7B/blob/main/TeleChat%E6%A8%A1%E5%9E%8B%E7%A4%BE%E5%8C%BA%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yi-1.5](LICENSE) / [Yuan 2](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan) + +## 引用 + +如果您觉得此项目有帮助,请考虑以下列格式引用 + +```bibtex +@inproceedings{zheng2024llamafactory, + title={LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models}, + author={Yaowei Zheng and Richong Zhang and Junhao Zhang and Yanhan Ye and Zheyan Luo and Zhangchi Feng and Yongqiang Ma}, + booktitle={Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)}, + address={Bangkok, Thailand}, + publisher={Association for Computational Linguistics}, + year={2024}, + url={http://arxiv.org/abs/2403.13372} +} +``` + +## 致谢 + +本项目受益于 [PEFT](https://github.com/huggingface/peft)、[TRL](https://github.com/huggingface/trl)、[QLoRA](https://github.com/artidoro/qlora) 和 [FastChat](https://github.com/lm-sys/FastChat),感谢以上诸位作者的付出。 + +## Star History + +![Star History Chart](https://api.star-history.com/svg?repos=hiyouga/LLaMA-Factory&type=Date) diff --git a/post-training/LLaMA-Factory/assets/benchmark.svg b/post-training/LLaMA-Factory/assets/benchmark.svg new file mode 100644 index 0000000..e2b1db4 --- /dev/null +++ b/post-training/LLaMA-Factory/assets/benchmark.svg @@ -0,0 +1,1216 @@ + + + + + + + + 2023-11-18T11:28:03.028228 + image/svg+xml + + + Matplotlib v3.7.1, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/post-training/LLaMA-Factory/assets/logo.png b/post-training/LLaMA-Factory/assets/logo.png new file mode 100644 index 0000000..5fb3dd5 Binary files /dev/null and b/post-training/LLaMA-Factory/assets/logo.png differ diff --git a/post-training/LLaMA-Factory/assets/wechat.jpg b/post-training/LLaMA-Factory/assets/wechat.jpg new file mode 100644 index 0000000..089674e Binary files /dev/null and b/post-training/LLaMA-Factory/assets/wechat.jpg differ diff --git a/post-training/LLaMA-Factory/assets/wechat_npu.jpg b/post-training/LLaMA-Factory/assets/wechat_npu.jpg new file mode 100644 index 0000000..a5dece5 Binary files /dev/null and b/post-training/LLaMA-Factory/assets/wechat_npu.jpg differ diff --git a/post-training/LLaMA-Factory/docker/docker-cuda/Dockerfile b/post-training/LLaMA-Factory/docker/docker-cuda/Dockerfile new file mode 100644 index 0000000..aac9635 --- /dev/null +++ b/post-training/LLaMA-Factory/docker/docker-cuda/Dockerfile @@ -0,0 +1,101 @@ +# Default use the NVIDIA official image with PyTorch 2.6.0 +# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/index.html +ARG BASE_IMAGE=nvcr.io/nvidia/pytorch:24.12-py3 +FROM ${BASE_IMAGE} + +# Define environments +ENV MAX_JOBS=4 +ENV FLASH_ATTENTION_FORCE_BUILD=TRUE +ENV VLLM_WORKER_MULTIPROC_METHOD=spawn + +# Define installation arguments +ARG INSTALL_BNB=false +ARG INSTALL_VLLM=false +ARG INSTALL_DEEPSPEED=false +ARG INSTALL_FLASHATTN=false +ARG INSTALL_LIGER_KERNEL=false +ARG INSTALL_HQQ=false +ARG INSTALL_EETQ=false +ARG PIP_INDEX=https://pypi.org/simple +ARG HTTP_PROXY= + +# Set the working directory +WORKDIR /app + +# Set http proxy +RUN if [ -n "$HTTP_PROXY" ]; then \ + echo "Configuring proxy..."; \ + export http_proxy=$HTTP_PROXY; \ + export https_proxy=$HTTP_PROXY; \ + fi + +# Install the requirements +COPY requirements.txt /app +RUN pip config set global.index-url "$PIP_INDEX" && \ + pip config set global.extra-index-url "$PIP_INDEX" && \ + python -m pip install --upgrade pip && \ + if [ -n "$HTTP_PROXY" ]; then \ + python -m pip install --proxy=$HTTP_PROXY -r requirements.txt; \ + else \ + python -m pip install -r requirements.txt; \ + fi + +# Copy the rest of the application into the image +COPY . /app + +# Install the LLaMA Factory +RUN EXTRA_PACKAGES="metrics"; \ + if [ "$INSTALL_BNB" == "true" ]; then \ + EXTRA_PACKAGES="${EXTRA_PACKAGES},bitsandbytes"; \ + fi; \ + if [ "$INSTALL_VLLM" == "true" ]; then \ + EXTRA_PACKAGES="${EXTRA_PACKAGES},vllm"; \ + fi; \ + if [ "$INSTALL_DEEPSPEED" == "true" ]; then \ + EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \ + fi; \ + if [ "$INSTALL_LIGER_KERNEL" == "true" ]; then \ + EXTRA_PACKAGES="${EXTRA_PACKAGES},liger-kernel"; \ + fi; \ + if [ "$INSTALL_HQQ" == "true" ]; then \ + EXTRA_PACKAGES="${EXTRA_PACKAGES},hqq"; \ + fi; \ + if [ "$INSTALL_EETQ" == "true" ]; then \ + EXTRA_PACKAGES="${EXTRA_PACKAGES},eetq"; \ + fi; \ + if [ -n "$HTTP_PROXY" ]; then \ + pip install --proxy=$HTTP_PROXY -e ".[$EXTRA_PACKAGES]"; \ + else \ + pip install -e ".[$EXTRA_PACKAGES]"; \ + fi + +# Rebuild flash attention +RUN pip uninstall -y transformer-engine flash-attn && \ + if [ "$INSTALL_FLASHATTN" == "true" ]; then \ + pip uninstall -y ninja && \ + if [ -n "$HTTP_PROXY" ]; then \ + pip install --proxy=$HTTP_PROXY ninja && \ + pip install --proxy=$HTTP_PROXY --no-cache-dir flash-attn --no-build-isolation; \ + else \ + pip install ninja && \ + pip install --no-cache-dir flash-attn --no-build-isolation; \ + fi; \ + fi + + +# Unset http proxy +RUN if [ -n "$HTTP_PROXY" ]; then \ + unset http_proxy; \ + unset https_proxy; \ + fi + +# Set up volumes +VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ] + +# Expose port 7860 for the LLaMA Board +ENV GRADIO_SERVER_PORT 7860 +EXPOSE 7860 + +# Expose port 8000 for the API service +ENV API_PORT 8000 +EXPOSE 8000 diff --git a/post-training/LLaMA-Factory/docker/docker-cuda/docker-compose.yml b/post-training/LLaMA-Factory/docker/docker-cuda/docker-compose.yml new file mode 100644 index 0000000..fa386cc --- /dev/null +++ b/post-training/LLaMA-Factory/docker/docker-cuda/docker-compose.yml @@ -0,0 +1,37 @@ +services: + llamafactory: + build: + dockerfile: ./docker/docker-cuda/Dockerfile + context: ../.. + args: + INSTALL_BNB: "false" + INSTALL_VLLM: "false" + INSTALL_DEEPSPEED: "false" + INSTALL_FLASHATTN: "false" + INSTALL_LIGER_KERNEL: "false" + INSTALL_HQQ: "false" + INSTALL_EETQ: "false" + PIP_INDEX: https://pypi.org/simple + container_name: llamafactory + volumes: + - ../../hf_cache:/root/.cache/huggingface + - ../../ms_cache:/root/.cache/modelscope + - ../../om_cache:/root/.cache/openmind + - ../../data:/app/data + - ../../output:/app/output + ports: + - "7860:7860" + - "8000:8000" + ipc: host + tty: true + shm_size: "16gb" + stdin_open: true + command: bash + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: "all" + capabilities: [gpu] + restart: unless-stopped diff --git a/post-training/LLaMA-Factory/docker/docker-npu/Dockerfile b/post-training/LLaMA-Factory/docker/docker-npu/Dockerfile new file mode 100644 index 0000000..3431176 --- /dev/null +++ b/post-training/LLaMA-Factory/docker/docker-npu/Dockerfile @@ -0,0 +1,67 @@ +# Use the Ubuntu 22.04 image with CANN 8.0.rc1 +# More versions can be found at https://hub.docker.com/r/ascendai/cann/tags +# FROM ascendai/cann:8.0.rc1-910-ubuntu22.04-py3.8 +FROM ascendai/cann:8.0.0-910b-ubuntu22.04-py3.10 +# FROM ascendai/cann:8.0.rc1-910-openeuler22.03-py3.8 +# FROM ascendai/cann:8.0.rc1-910b-openeuler22.03-py3.8 + +# Define environments +ENV DEBIAN_FRONTEND=noninteractive + +# Define installation arguments +ARG INSTALL_DEEPSPEED=false +ARG PIP_INDEX=https://pypi.org/simple +ARG TORCH_INDEX=https://download.pytorch.org/whl/cpu +ARG HTTP_PROXY= + +# Set the working directory +WORKDIR /app + +# Set http proxy +RUN if [ -n "$HTTP_PROXY" ]; then \ + echo "Configuring proxy..."; \ + export http_proxy=$HTTP_PROXY; \ + export https_proxy=$HTTP_PROXY; \ + fi + +# Install the requirements +COPY requirements.txt /app +RUN pip config set global.index-url "$PIP_INDEX" && \ + pip config set global.extra-index-url "$TORCH_INDEX" && \ + python -m pip install --upgrade pip && \ + if [ -n "$HTTP_PROXY" ]; then \ + python -m pip install --proxy=$HTTP_PROXY -r requirements.txt; \ + else \ + python -m pip install -r requirements.txt; \ + fi + +# Copy the rest of the application into the image +COPY . /app + +# Install the LLaMA Factory +RUN EXTRA_PACKAGES="torch-npu,metrics"; \ + if [ "$INSTALL_DEEPSPEED" == "true" ]; then \ + EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \ + fi; \ + if [ -n "$HTTP_PROXY" ]; then \ + pip install --proxy=$HTTP_PROXY -e ".[$EXTRA_PACKAGES]"; \ + else \ + pip install -e ".[$EXTRA_PACKAGES]"; \ + fi + +# Unset http proxy +RUN if [ -n "$HTTP_PROXY" ]; then \ + unset http_proxy; \ + unset https_proxy; \ + fi + +# Set up volumes +VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ] + +# Expose port 7860 for the LLaMA Board +ENV GRADIO_SERVER_PORT 7860 +EXPOSE 7860 + +# Expose port 8000 for the API service +ENV API_PORT 8000 +EXPOSE 8000 diff --git a/post-training/LLaMA-Factory/docker/docker-npu/docker-compose.yml b/post-training/LLaMA-Factory/docker/docker-npu/docker-compose.yml new file mode 100644 index 0000000..dd9a8e1 --- /dev/null +++ b/post-training/LLaMA-Factory/docker/docker-npu/docker-compose.yml @@ -0,0 +1,33 @@ +services: + llamafactory: + build: + dockerfile: ./docker/docker-npu/Dockerfile + context: ../.. + args: + INSTALL_DEEPSPEED: "false" + PIP_INDEX: https://pypi.org/simple + container_name: llamafactory + volumes: + - ../../hf_cache:/root/.cache/huggingface + - ../../ms_cache:/root/.cache/modelscope + - ../../om_cache:/root/.cache/openmind + - ../../data:/app/data + - ../../output:/app/output + - /usr/local/dcmi:/usr/local/dcmi + - /usr/local/bin/npu-smi:/usr/local/bin/npu-smi + - /usr/local/Ascend/driver:/usr/local/Ascend/driver + - /etc/ascend_install.info:/etc/ascend_install.info + ports: + - "7860:7860" + - "8000:8000" + ipc: host + tty: true + shm_size: "16gb" + stdin_open: true + command: bash + devices: + - /dev/davinci0 + - /dev/davinci_manager + - /dev/devmm_svm + - /dev/hisi_hdc + restart: unless-stopped diff --git a/post-training/LLaMA-Factory/docker/docker-rocm/Dockerfile b/post-training/LLaMA-Factory/docker/docker-rocm/Dockerfile new file mode 100644 index 0000000..9595baf --- /dev/null +++ b/post-training/LLaMA-Factory/docker/docker-rocm/Dockerfile @@ -0,0 +1,105 @@ +FROM hardandheavy/transformers-rocm:2.2.0 + +# Define environments +ENV MAX_JOBS=4 +ENV FLASH_ATTENTION_FORCE_BUILD=TRUE +ENV VLLM_WORKER_MULTIPROC_METHOD=spawn + +# Define installation arguments +ARG INSTALL_BNB=false +ARG INSTALL_VLLM=false +ARG INSTALL_DEEPSPEED=false +ARG INSTALL_FLASHATTN=false +ARG INSTALL_LIGER_KERNEL=false +ARG INSTALL_HQQ=false +ARG INSTALL_PYTORCH=true +ARG PIP_INDEX=https://pypi.org/simple +ARG HTTP_PROXY= +ARG PYTORCH_INDEX=https://download.pytorch.org/whl/nightly/rocm6.3 + +# Use Bash instead of default /bin/sh +SHELL ["/bin/bash", "-c"] + +# Set the working directory +WORKDIR /app + +# Set http proxy +RUN if [ -n "$HTTP_PROXY" ]; then \ + echo "Configuring proxy..."; \ + export http_proxy=$HTTP_PROXY; \ + export https_proxy=$HTTP_PROXY; \ + fi + +# Install the requirements +COPY requirements.txt /app +RUN pip config set global.index-url "$PIP_INDEX" && \ + pip config set global.extra-index-url "$PIP_INDEX" && \ + python -m pip install --upgrade pip && \ + if [ -n "$HTTP_PROXY" ]; then \ + python -m pip install --proxy=$HTTP_PROXY -r requirements.txt; \ + else \ + python -m pip install -r requirements.txt; \ + fi + +# Copy the rest of the application into the image +COPY . /app + +# Install the LLaMA Factory +RUN EXTRA_PACKAGES="metrics"; \ + if [ "$INSTALL_BNB" == "true" ]; then \ + EXTRA_PACKAGES="${EXTRA_PACKAGES},bitsandbytes"; \ + fi; \ + if [ "$INSTALL_VLLM" == "true" ]; then \ + EXTRA_PACKAGES="${EXTRA_PACKAGES},vllm"; \ + fi; \ + if [ "$INSTALL_DEEPSPEED" == "true" ]; then \ + EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \ + fi; \ + if [ "$INSTALL_LIGER_KERNEL" == "true" ]; then \ + EXTRA_PACKAGES="${EXTRA_PACKAGES},liger-kernel"; \ + fi; \ + if [ "$INSTALL_HQQ" == "true" ]; then \ + EXTRA_PACKAGES="${EXTRA_PACKAGES},hqq"; \ + fi; \ + if [ -n "$HTTP_PROXY" ]; then \ + pip install --proxy=$HTTP_PROXY -e ".[$EXTRA_PACKAGES]"; \ + else \ + pip install -e ".[$EXTRA_PACKAGES]"; \ + fi + +# Reinstall pytorch +# This is necessary to ensure that the correct version of PyTorch is installed +RUN if [ "$INSTALL_PYTORCH" == "true" ]; then \ + pip uninstall -y torch torchvision torchaudio && \ + pip install --pre torch torchvision torchaudio --index-url "$PYTORCH_INDEX"; \ + fi + +# Rebuild flash attention +RUN pip uninstall -y transformer-engine flash-attn && \ + if [ "$INSTALL_FLASHATTN" == "true" ]; then \ + pip uninstall -y ninja && \ + if [ -n "$HTTP_PROXY" ]; then \ + pip install --proxy=$HTTP_PROXY ninja && \ + pip install --proxy=$HTTP_PROXY --no-cache-dir flash-attn --no-build-isolation; \ + else \ + pip install ninja && \ + pip install --no-cache-dir flash-attn --no-build-isolation; \ + fi; \ + fi + +# Unset http proxy +RUN if [ -n "$HTTP_PROXY" ]; then \ + unset http_proxy; \ + unset https_proxy; \ + fi + +# Set up volumes +VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ] + +# Expose port 7860 for the LLaMA Board +ENV GRADIO_SERVER_PORT 7860 +EXPOSE 7860 + +# Expose port 8000 for the API service +ENV API_PORT 8000 +EXPOSE 8000 diff --git a/post-training/LLaMA-Factory/docker/docker-rocm/docker-compose.yml b/post-training/LLaMA-Factory/docker/docker-rocm/docker-compose.yml new file mode 100644 index 0000000..caaf4e1 --- /dev/null +++ b/post-training/LLaMA-Factory/docker/docker-rocm/docker-compose.yml @@ -0,0 +1,35 @@ +services: + llamafactory: + build: + dockerfile: ./docker/docker-rocm/Dockerfile + context: ../.. + args: + INSTALL_BNB: "false" + INSTALL_VLLM: "false" + INSTALL_DEEPSPEED: "false" + INSTALL_FLASHATTN: "false" + INSTALL_LIGER_KERNEL: "false" + INSTALL_PYTORCH: "true" + INSTALL_HQQ: "false" + PIP_INDEX: https://pypi.org/simple + PYTORCH_INDEX: https://download.pytorch.org/whl/nightly/rocm6.3 + container_name: llamafactory + volumes: + - ../../hf_cache:/root/.cache/huggingface + - ../../ms_cache:/root/.cache/modelscope + - ../../om_cache:/root/.cache/openmind + - ../../data:/app/data + - ../../output:/app/output + - ../../saves:/app/saves + ports: + - "7860:7860" + - "8000:8000" + ipc: host + tty: true + shm_size: "16gb" + stdin_open: true + command: bash + devices: + - /dev/kfd:/dev/kfd + - /dev/dri:/dev/dri + restart: unless-stopped diff --git a/post-training/LLaMA-Factory/evaluation/ceval/ceval.py b/post-training/LLaMA-Factory/evaluation/ceval/ceval.py new file mode 100644 index 0000000..72693eb --- /dev/null +++ b/post-training/LLaMA-Factory/evaluation/ceval/ceval.py @@ -0,0 +1,163 @@ +# Copyright 2025 the LlamaFactory team. +# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import datasets +import pandas as pd + + +_CITATION = """\ +@article{huang2023ceval, + title={C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models}, + author={Huang, Yuzhen and Bai, Yuzhuo and Zhu, Zhihao and others}, + journal={arXiv preprint arXiv:2305.08322}, + year={2023} +} +""" + +_DESCRIPTION = """\ +C-Eval is a comprehensive Chinese evaluation suite for foundation models. +It consists of 13948 multi-choice questions spanning 52 diverse disciplines and four difficulty levels. +""" + +_HOMEPAGE = "https://cevalbenchmark.com" + +_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License" + +_URL = "ceval.zip" + +task_list = [ + "computer_network", + "operating_system", + "computer_architecture", + "college_programming", + "college_physics", + "college_chemistry", + "advanced_mathematics", + "probability_and_statistics", + "discrete_mathematics", + "electrical_engineer", + "metrology_engineer", + "high_school_mathematics", + "high_school_physics", + "high_school_chemistry", + "high_school_biology", + "middle_school_mathematics", + "middle_school_biology", + "middle_school_physics", + "middle_school_chemistry", + "veterinary_medicine", + "college_economics", + "business_administration", + "marxism", + "mao_zedong_thought", + "education_science", + "teacher_qualification", + "high_school_politics", + "high_school_geography", + "middle_school_politics", + "middle_school_geography", + "modern_chinese_history", + "ideological_and_moral_cultivation", + "logic", + "law", + "chinese_language_and_literature", + "art_studies", + "professional_tour_guide", + "legal_professional", + "high_school_chinese", + "high_school_history", + "middle_school_history", + "civil_servant", + "sports_science", + "plant_protection", + "basic_medicine", + "clinical_medicine", + "urban_and_rural_planner", + "accountant", + "fire_engineer", + "environmental_impact_assessment_engineer", + "tax_accountant", + "physician", +] + + +class CevalConfig(datasets.BuilderConfig): + def __init__(self, **kwargs): + super().__init__(version=datasets.Version("1.0.0"), **kwargs) + + +class Ceval(datasets.GeneratorBasedBuilder): + BUILDER_CONFIGS = [ + CevalConfig( + name=task_name, + ) + for task_name in task_list + ] + + def _info(self): + features = datasets.Features( + { + "id": datasets.Value("int32"), + "question": datasets.Value("string"), + "A": datasets.Value("string"), + "B": datasets.Value("string"), + "C": datasets.Value("string"), + "D": datasets.Value("string"), + "answer": datasets.Value("string"), + "explanation": datasets.Value("string"), + } + ) + return datasets.DatasetInfo( + description=_DESCRIPTION, + features=features, + homepage=_HOMEPAGE, + license=_LICENSE, + citation=_CITATION, + ) + + def _split_generators(self, dl_manager): + data_dir = dl_manager.download_and_extract(_URL) + task_name = self.config.name + return [ + datasets.SplitGenerator( + name=datasets.Split.TEST, + gen_kwargs={ + "filepath": os.path.join(data_dir, "test", f"{task_name}_test.csv"), + }, + ), + datasets.SplitGenerator( + name=datasets.Split.VALIDATION, + gen_kwargs={ + "filepath": os.path.join(data_dir, "val", f"{task_name}_val.csv"), + }, + ), + datasets.SplitGenerator( + name=datasets.Split.TRAIN, + gen_kwargs={ + "filepath": os.path.join(data_dir, "dev", f"{task_name}_dev.csv"), + }, + ), + ] + + def _generate_examples(self, filepath): + df = pd.read_csv(filepath, encoding="utf-8") + for i, instance in enumerate(df.to_dict(orient="records")): + if "answer" not in instance.keys(): + instance["answer"] = "" + if "explanation" not in instance.keys(): + instance["explanation"] = "" + yield i, instance diff --git a/post-training/LLaMA-Factory/evaluation/ceval/ceval.zip b/post-training/LLaMA-Factory/evaluation/ceval/ceval.zip new file mode 100644 index 0000000..d39274a Binary files /dev/null and b/post-training/LLaMA-Factory/evaluation/ceval/ceval.zip differ diff --git a/post-training/LLaMA-Factory/evaluation/ceval/mapping.json b/post-training/LLaMA-Factory/evaluation/ceval/mapping.json new file mode 100644 index 0000000..42584aa --- /dev/null +++ b/post-training/LLaMA-Factory/evaluation/ceval/mapping.json @@ -0,0 +1,210 @@ +{ + "accountant": { + "name": "注册会计师", + "category": "Other" + }, + "advanced_mathematics": { + "name": "高等数学", + "category": "STEM" + }, + "art_studies": { + "name": "艺术学", + "category": "Humanities" + }, + "basic_medicine": { + "name": "基础医学", + "category": "Other" + }, + "business_administration": { + "name": "工商管理", + "category": "Social Sciences" + }, + "chinese_language_and_literature": { + "name": "中国语言文学", + "category": "Humanities" + }, + "civil_servant": { + "name": "公务员", + "category": "Other" + }, + "clinical_medicine": { + "name": "临床医学", + "category": "Other" + }, + "college_chemistry": { + "name": "大学化学", + "category": "STEM" + }, + "college_economics": { + "name": "大学经济学", + "category": "Social Sciences" + }, + "college_physics": { + "name": "大学物理", + "category": "STEM" + }, + "college_programming": { + "name": "大学编程", + "category": "STEM" + }, + "computer_architecture": { + "name": "计算机组成", + "category": "STEM" + }, + "computer_network": { + "name": "计算机网络", + "category": "STEM" + }, + "discrete_mathematics": { + "name": "离散数学", + "category": "STEM" + }, + "education_science": { + "name": "教育学", + "category": "Social Sciences" + }, + "electrical_engineer": { + "name": "注册电气工程师", + "category": "STEM" + }, + "environmental_impact_assessment_engineer": { + "name": "环境影响评价工程师", + "category": "Other" + }, + "fire_engineer": { + "name": "注册消防工程师", + "category": "Other" + }, + "high_school_biology": { + "name": "高中生物", + "category": "STEM" + }, + "high_school_chemistry": { + "name": "高中化学", + "category": "STEM" + }, + "high_school_chinese": { + "name": "高中语文", + "category": "Humanities" + }, + "high_school_geography": { + "name": "高中地理", + "category": "Social Sciences" + }, + "high_school_history": { + "name": "高中历史", + "category": "Humanities" + }, + "high_school_mathematics": { + "name": "高中数学", + "category": "STEM" + }, + "high_school_physics": { + "name": "高中物理", + "category": "STEM" + }, + "high_school_politics": { + "name": "高中政治", + "category": "Social Sciences" + }, + "ideological_and_moral_cultivation": { + "name": "思想道德修养与法律基础", + "category": "Humanities" + }, + "law": { + "name": "法学", + "category": "Humanities" + }, + "legal_professional": { + "name": "法律职业资格", + "category": "Humanities" + }, + "logic": { + "name": "逻辑学", + "category": "Humanities" + }, + "mao_zedong_thought": { + "name": "毛泽东思想和中国特色社会主义理论体系概论", + "category": "Social Sciences" + }, + "marxism": { + "name": "马克思主义基本原理", + "category": "Social Sciences" + }, + "metrology_engineer": { + "name": "注册计量师", + "category": "STEM" + }, + "middle_school_biology": { + "name": "初中生物", + "category": "STEM" + }, + "middle_school_chemistry": { + "name": "初中化学", + "category": "STEM" + }, + "middle_school_geography": { + "name": "初中地理", + "category": "Social Sciences" + }, + "middle_school_history": { + "name": "初中历史", + "category": "Humanities" + }, + "middle_school_mathematics": { + "name": "初中数学", + "category": "STEM" + }, + "middle_school_physics": { + "name": "初中物理", + "category": "STEM" + }, + "middle_school_politics": { + "name": "初中政治", + "category": "Social Sciences" + }, + "modern_chinese_history": { + "name": "近代史纲要", + "category": "Humanities" + }, + "operating_system": { + "name": "操作系统", + "category": "STEM" + }, + "physician": { + "name": "医师资格", + "category": "Other" + }, + "plant_protection": { + "name": "植物保护", + "category": "Other" + }, + "probability_and_statistics": { + "name": "概率统计", + "category": "STEM" + }, + "professional_tour_guide": { + "name": "导游资格", + "category": "Humanities" + }, + "sports_science": { + "name": "体育学", + "category": "Other" + }, + "tax_accountant": { + "name": "税务师", + "category": "Other" + }, + "teacher_qualification": { + "name": "教师资格", + "category": "Social Sciences" + }, + "urban_and_rural_planner": { + "name": "注册城乡规划师", + "category": "Other" + }, + "veterinary_medicine": { + "name": "兽医学", + "category": "STEM" + } +} diff --git a/post-training/LLaMA-Factory/evaluation/cmmlu/cmmlu.py b/post-training/LLaMA-Factory/evaluation/cmmlu/cmmlu.py new file mode 100644 index 0000000..44c52f1 --- /dev/null +++ b/post-training/LLaMA-Factory/evaluation/cmmlu/cmmlu.py @@ -0,0 +1,170 @@ +# Copyright 2025 the LlamaFactory team. +# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import datasets +import pandas as pd + + +_CITATION = """\ +@article{li2023cmmlu, + title={CMMLU: Measuring massive multitask language understanding in Chinese}, + author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and others, + journal={arXiv preprint arXiv:2306.09212}, + year={2023} +} +""" + +_DESCRIPTION = """\ +CMMLU is a comprehensive Chinese assessment suite specifically designed to evaluate the advanced knowledge +and reasoning abilities of LLMs within the Chinese language and cultural context. +""" + +_HOMEPAGE = "https://github.com/haonan-li/CMMLU" + +_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License" + +_URL = "cmmlu.zip" + +task_list = [ + "agronomy", + "anatomy", + "ancient_chinese", + "arts", + "astronomy", + "business_ethics", + "chinese_civil_service_exam", + "chinese_driving_rule", + "chinese_food_culture", + "chinese_foreign_policy", + "chinese_history", + "chinese_literature", + "chinese_teacher_qualification", + "clinical_knowledge", + "college_actuarial_science", + "college_education", + "college_engineering_hydrology", + "college_law", + "college_mathematics", + "college_medical_statistics", + "college_medicine", + "computer_science", + "computer_security", + "conceptual_physics", + "construction_project_management", + "economics", + "education", + "electrical_engineering", + "elementary_chinese", + "elementary_commonsense", + "elementary_information_and_technology", + "elementary_mathematics", + "ethnology", + "food_science", + "genetics", + "global_facts", + "high_school_biology", + "high_school_chemistry", + "high_school_geography", + "high_school_mathematics", + "high_school_physics", + "high_school_politics", + "human_sexuality", + "international_law", + "journalism", + "jurisprudence", + "legal_and_moral_basis", + "logical", + "machine_learning", + "management", + "marketing", + "marxist_theory", + "modern_chinese", + "nutrition", + "philosophy", + "professional_accounting", + "professional_law", + "professional_medicine", + "professional_psychology", + "public_relations", + "security_study", + "sociology", + "sports_science", + "traditional_chinese_medicine", + "virology", + "world_history", + "world_religions", +] + + +class CMMLUConfig(datasets.BuilderConfig): + def __init__(self, **kwargs): + super().__init__(version=datasets.Version("1.0.1"), **kwargs) + + +class CMMLU(datasets.GeneratorBasedBuilder): + BUILDER_CONFIGS = [ + CMMLUConfig( + name=task_name, + ) + for task_name in task_list + ] + + def _info(self): + features = datasets.Features( + { + "question": datasets.Value("string"), + "A": datasets.Value("string"), + "B": datasets.Value("string"), + "C": datasets.Value("string"), + "D": datasets.Value("string"), + "answer": datasets.Value("string"), + } + ) + return datasets.DatasetInfo( + description=_DESCRIPTION, + features=features, + homepage=_HOMEPAGE, + license=_LICENSE, + citation=_CITATION, + ) + + def _split_generators(self, dl_manager): + data_dir = dl_manager.download_and_extract(_URL) + task_name = self.config.name + return [ + datasets.SplitGenerator( + name=datasets.Split.TEST, + gen_kwargs={ + "filepath": os.path.join(data_dir, f"test/{task_name}.csv"), + }, + ), + datasets.SplitGenerator( + name=datasets.Split.TRAIN, + gen_kwargs={ + "filepath": os.path.join(data_dir, f"dev/{task_name}.csv"), + }, + ), + ] + + def _generate_examples(self, filepath): + df = pd.read_csv(filepath, header=0, index_col=0, encoding="utf-8") + for i, instance in enumerate(df.to_dict(orient="records")): + question = instance.pop("Question", "") + answer = instance.pop("Answer", "") + instance["question"] = question + instance["answer"] = answer + yield i, instance diff --git a/post-training/LLaMA-Factory/evaluation/cmmlu/cmmlu.zip b/post-training/LLaMA-Factory/evaluation/cmmlu/cmmlu.zip new file mode 100644 index 0000000..c6bede1 Binary files /dev/null and b/post-training/LLaMA-Factory/evaluation/cmmlu/cmmlu.zip differ diff --git a/post-training/LLaMA-Factory/evaluation/cmmlu/mapping.json b/post-training/LLaMA-Factory/evaluation/cmmlu/mapping.json new file mode 100644 index 0000000..312d038 --- /dev/null +++ b/post-training/LLaMA-Factory/evaluation/cmmlu/mapping.json @@ -0,0 +1,270 @@ +{ + "agronomy": { + "name": "农学", + "category": "Other" + }, + "anatomy": { + "name": "解剖学", + "category": "STEM" + }, + "ancient_chinese": { + "name": "古汉语", + "category": "Social Sciences" + }, + "arts": { + "name": "艺术学", + "category": "Humanities" + }, + "astronomy": { + "name": "天文学", + "category": "STEM" + }, + "business_ethics": { + "name": "商业伦理", + "category": "Social Sciences" + }, + "chinese_civil_service_exam": { + "name": "中国公务员考试", + "category": "Social Sciences" + }, + "chinese_driving_rule": { + "name": "中国驾驶规则", + "category": "Other" + }, + "chinese_food_culture": { + "name": "中国饮食文化", + "category": "Social Sciences" + }, + "chinese_foreign_policy": { + "name": "中国外交政策", + "category": "Social Sciences" + }, + "chinese_history": { + "name": "中国历史", + "category": "Humanities" + }, + "chinese_literature": { + "name": "中国文学", + "category": "Humanities" + }, + "chinese_teacher_qualification": { + "name": "中国教师资格", + "category": "Social Sciences" + }, + "college_actuarial_science": { + "name": "大学精算学", + "category": "STEM" + }, + "college_education": { + "name": "大学教育学", + "category": "Social Sciences" + }, + "college_engineering_hydrology": { + "name": "大学工程水文学", + "category": "STEM" + }, + "college_law": { + "name": "大学法律", + "category": "Humanities" + }, + "college_mathematics": { + "name": "大学数学", + "category": "STEM" + }, + "college_medical_statistics": { + "name": "大学医学统计", + "category": "STEM" + }, + "clinical_knowledge": { + "name": "临床知识", + "category": "Other" + }, + "college_medicine": { + "name": "大学医学", + "category": "Other" + }, + "computer_science": { + "name": "计算机科学", + "category": "STEM" + }, + "computer_security": { + "name": "计算机安全", + "category": "Other" + }, + "conceptual_physics": { + "name": "概念物理学", + "category": "STEM" + }, + "construction_project_management": { + "name": "建设工程管理", + "category": "Other" + }, + "economics": { + "name": "经济学", + "category": "Social Sciences" + }, + "education": { + "name": "教育学", + "category": "Social Sciences" + }, + "elementary_chinese": { + "name": "小学语文", + "category": "Social Sciences" + }, + "elementary_commonsense": { + "name": "小学常识", + "category": "Other" + }, + "elementary_information_and_technology": { + "name": "小学信息技术", + "category": "Other" + }, + "electrical_engineering": { + "name": "电气工程", + "category": "STEM" + }, + "elementary_mathematics": { + "name": "初等数学", + "category": "STEM" + }, + "ethnology": { + "name": "民族学", + "category": "Social Sciences" + }, + "food_science": { + "name": "食品科学", + "category": "Other" + }, + "genetics": { + "name": "遗传学", + "category": "STEM" + }, + "global_facts": { + "name": "全球事实", + "category": "Humanities" + }, + "high_school_biology": { + "name": "高中生物", + "category": "STEM" + }, + "high_school_chemistry": { + "name": "高中化学", + "category": "STEM" + }, + "high_school_geography": { + "name": "高中地理", + "category": "Social Sciences" + }, + "high_school_mathematics": { + "name": "高中数学", + "category": "STEM" + }, + "high_school_physics": { + "name": "高中物理学", + "category": "STEM" + }, + "high_school_politics": { + "name": "高中政治", + "category": "Social Sciences" + }, + "human_sexuality": { + "name": "人类性行为", + "category": "Other" + }, + "international_law": { + "name": "国际法学", + "category": "Humanities" + }, + "journalism": { + "name": "新闻学", + "category": "Social Sciences" + }, + "jurisprudence": { + "name": "法理学", + "category": "Humanities" + }, + "legal_and_moral_basis": { + "name": "法律与道德基础", + "category": "Other" + }, + "logical": { + "name": "逻辑学", + "category": "Humanities" + }, + "machine_learning": { + "name": "机器学习", + "category": "STEM" + }, + "management": { + "name": "管理学", + "category": "Social Sciences" + }, + "marketing": { + "name": "市场营销", + "category": "Social Sciences" + }, + "marxist_theory": { + "name": "马克思主义理论", + "category": "Humanities" + }, + "modern_chinese": { + "name": "现代汉语", + "category": "Social Sciences" + }, + "nutrition": { + "name": "营养学", + "category": "Other" + }, + "philosophy": { + "name": "哲学", + "category": "Humanities" + }, + "professional_accounting": { + "name": "专业会计", + "category": "Social Sciences" + }, + "professional_law": { + "name": "专业法学", + "category": "Humanities" + }, + "professional_medicine": { + "name": "专业医学", + "category": "Other" + }, + "professional_psychology": { + "name": "专业心理学", + "category": "Social Sciences" + }, + "public_relations": { + "name": "公共关系", + "category": "Social Sciences" + }, + "security_study": { + "name": "安全研究", + "category": "Social Sciences" + }, + "sociology": { + "name": "社会学", + "category": "Social Sciences" + }, + "sports_science": { + "name": "体育学", + "category": "Other" + }, + "traditional_chinese_medicine": { + "name": "中医中药", + "category": "Other" + }, + "virology": { + "name": "病毒学", + "category": "STEM" + }, + "world_history": { + "name": "世界历史", + "category": "Humanities" + }, + "world_religions": { + "name": "世界宗教", + "category": "Humanities" + } +} diff --git a/post-training/LLaMA-Factory/evaluation/mmlu/mapping.json b/post-training/LLaMA-Factory/evaluation/mmlu/mapping.json new file mode 100644 index 0000000..27bd7c2 --- /dev/null +++ b/post-training/LLaMA-Factory/evaluation/mmlu/mapping.json @@ -0,0 +1,230 @@ +{ + "abstract_algebra": { + "name": "abstract algebra", + "category": "STEM" + }, + "anatomy": { + "name": "anatomy", + "category": "Other" + }, + "astronomy": { + "name": "astronomy", + "category": "STEM" + }, + "business_ethics": { + "name": "business ethics", + "category": "Other" + }, + "clinical_knowledge": { + "name": "clinical knowledge", + "category": "Other" + }, + "college_biology": { + "name": "college biology", + "category": "STEM" + }, + "college_chemistry": { + "name": "college chemistry", + "category": "STEM" + }, + "college_computer_science": { + "name": "college computer science", + "category": "STEM" + }, + "college_mathematics": { + "name": "college mathematics", + "category": "STEM" + }, + "college_medicine": { + "name": "college medicine", + "category": "Other" + }, + "college_physics": { + "name": "college physics", + "category": "STEM" + }, + "computer_security": { + "name": "computer security", + "category": "STEM" + }, + "conceptual_physics": { + "name": "conceptual physics", + "category": "STEM" + }, + "econometrics": { + "name": "econometrics", + "category": "Social Sciences" + }, + "electrical_engineering": { + "name": "electrical engineering", + "category": "STEM" + }, + "elementary_mathematics": { + "name": "elementary mathematics", + "category": "STEM" + }, + "formal_logic": { + "name": "formal logic", + "category": "Humanities" + }, + "global_facts": { + "name": "global facts", + "category": "Other" + }, + "high_school_biology": { + "name": "high school biology", + "category": "STEM" + }, + "high_school_chemistry": { + "name": "high school chemistry", + "category": "STEM" + }, + "high_school_computer_science": { + "name": "high school computer science", + "category": "STEM" + }, + "high_school_european_history": { + "name": "high school european history", + "category": "Humanities" + }, + "high_school_geography": { + "name": "high school geography", + "category": "Social Sciences" + }, + "high_school_government_and_politics": { + "name": "high school government and politics", + "category": "Social Sciences" + }, + "high_school_macroeconomics": { + "name": "high school macroeconomics", + "category": "Social Sciences" + }, + "high_school_mathematics": { + "name": "high school mathematics", + "category": "STEM" + }, + "high_school_microeconomics": { + "name": "high school microeconomics", + "category": "Social Sciences" + }, + "high_school_physics": { + "name": "high school physics", + "category": "STEM" + }, + "high_school_psychology": { + "name": "high school psychology", + "category": "Social Sciences" + }, + "high_school_statistics": { + "name": "high school statistics", + "category": "STEM" + }, + "high_school_us_history": { + "name": "high school us history", + "category": "Humanities" + }, + "high_school_world_history": { + "name": "high school world history", + "category": "Humanities" + }, + "human_aging": { + "name": "human aging", + "category": "Other" + }, + "human_sexuality": { + "name": "human sexuality", + "category": "Social Sciences" + }, + "international_law": { + "name": "international law", + "category": "Humanities" + }, + "jurisprudence": { + "name": "jurisprudence", + "category": "Humanities" + }, + "logical_fallacies": { + "name": "logical fallacies", + "category": "Humanities" + }, + "machine_learning": { + "name": "machine learning", + "category": "STEM" + }, + "management": { + "name": "management", + "category": "Other" + }, + "marketing": { + "name": "marketing", + "category": "Other" + }, + "medical_genetics": { + "name": "medical genetics", + "category": "Other" + }, + "miscellaneous": { + "name": "miscellaneous", + "category": "Other" + }, + "moral_disputes": { + "name": "moral disputes", + "category": "Humanities" + }, + "moral_scenarios": { + "name": "moral scenarios", + "category": "Humanities" + }, + "nutrition": { + "name": "nutrition", + "category": "Other" + }, + "philosophy": { + "name": "philosophy", + "category": "Humanities" + }, + "prehistory": { + "name": "prehistory", + "category": "Humanities" + }, + "professional_accounting": { + "name": "professional accounting", + "category": "Other" + }, + "professional_law": { + "name": "professional law", + "category": "Humanities" + }, + "professional_medicine": { + "name": "professional medicine", + "category": "Other" + }, + "professional_psychology": { + "name": "professional psychology", + "category": "Social Sciences" + }, + "public_relations": { + "name": "public relations", + "category": "Social Sciences" + }, + "security_studies": { + "name": "security studies", + "category": "Social Sciences" + }, + "sociology": { + "name": "sociology", + "category": "Social Sciences" + }, + "us_foreign_policy": { + "name": "us foreign policy", + "category": "Social Sciences" + }, + "virology": { + "name": "virology", + "category": "Other" + }, + "world_religions": { + "name": "world religions", + "category": "Humanities" + } +} diff --git a/post-training/LLaMA-Factory/evaluation/mmlu/mmlu.py b/post-training/LLaMA-Factory/evaluation/mmlu/mmlu.py new file mode 100644 index 0000000..6312742 --- /dev/null +++ b/post-training/LLaMA-Factory/evaluation/mmlu/mmlu.py @@ -0,0 +1,163 @@ +# Copyright 2025 the LlamaFactory team. +# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import datasets +import pandas as pd + + +_CITATION = """\ +@article{hendryckstest2021, + title={Measuring Massive Multitask Language Understanding}, + author={Dan Hendrycks and Collin Burns and others}, + journal={Proceedings of the International Conference on Learning Representations (ICLR)}, + year={2021} +} +""" + +_DESCRIPTION = """\ +Measuring Massive Multitask Language Understanding by Dan Hendrycks, Collin Burns, Steven Basart, +Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt (ICLR 2021). +""" + +_HOMEPAGE = "https://github.com/hendrycks/test" + +_LICENSE = "MIT" + +_URL = "mmlu.zip" + +task_list = [ + "high_school_european_history", + "business_ethics", + "clinical_knowledge", + "medical_genetics", + "high_school_us_history", + "high_school_physics", + "high_school_world_history", + "virology", + "high_school_microeconomics", + "econometrics", + "college_computer_science", + "high_school_biology", + "abstract_algebra", + "professional_accounting", + "philosophy", + "professional_medicine", + "nutrition", + "global_facts", + "machine_learning", + "security_studies", + "public_relations", + "professional_psychology", + "prehistory", + "anatomy", + "human_sexuality", + "college_medicine", + "high_school_government_and_politics", + "college_chemistry", + "logical_fallacies", + "high_school_geography", + "elementary_mathematics", + "human_aging", + "college_mathematics", + "high_school_psychology", + "formal_logic", + "high_school_statistics", + "international_law", + "high_school_mathematics", + "high_school_computer_science", + "conceptual_physics", + "miscellaneous", + "high_school_chemistry", + "marketing", + "professional_law", + "management", + "college_physics", + "jurisprudence", + "world_religions", + "sociology", + "us_foreign_policy", + "high_school_macroeconomics", + "computer_security", + "moral_scenarios", + "moral_disputes", + "electrical_engineering", + "astronomy", + "college_biology", +] + + +class MMLUConfig(datasets.BuilderConfig): + def __init__(self, **kwargs): + super().__init__(version=datasets.Version("1.0.0"), **kwargs) + + +class MMLU(datasets.GeneratorBasedBuilder): + BUILDER_CONFIGS = [ + MMLUConfig( + name=task_name, + ) + for task_name in task_list + ] + + def _info(self): + features = datasets.Features( + { + "question": datasets.Value("string"), + "A": datasets.Value("string"), + "B": datasets.Value("string"), + "C": datasets.Value("string"), + "D": datasets.Value("string"), + "answer": datasets.Value("string"), + } + ) + return datasets.DatasetInfo( + description=_DESCRIPTION, + features=features, + homepage=_HOMEPAGE, + license=_LICENSE, + citation=_CITATION, + ) + + def _split_generators(self, dl_manager): + data_dir = dl_manager.download_and_extract(_URL) + task_name = self.config.name + return [ + datasets.SplitGenerator( + name=datasets.Split.TEST, + gen_kwargs={ + "filepath": os.path.join(data_dir, "data", "test", f"{task_name}_test.csv"), + }, + ), + datasets.SplitGenerator( + name=datasets.Split.VALIDATION, + gen_kwargs={ + "filepath": os.path.join(data_dir, "data", "val", f"{task_name}_val.csv"), + }, + ), + datasets.SplitGenerator( + name=datasets.Split.TRAIN, + gen_kwargs={ + "filepath": os.path.join(data_dir, "data", "dev", f"{task_name}_dev.csv"), + }, + ), + ] + + def _generate_examples(self, filepath): + df = pd.read_csv(filepath, header=None) + df.columns = ["question", "A", "B", "C", "D", "answer"] + + yield from enumerate(df.to_dict(orient="records")) diff --git a/post-training/LLaMA-Factory/evaluation/mmlu/mmlu.zip b/post-training/LLaMA-Factory/evaluation/mmlu/mmlu.zip new file mode 100644 index 0000000..1aaee65 Binary files /dev/null and b/post-training/LLaMA-Factory/evaluation/mmlu/mmlu.zip differ diff --git a/post-training/LLaMA-Factory/examples/README.md b/post-training/LLaMA-Factory/examples/README.md new file mode 100644 index 0000000..457ec87 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/README.md @@ -0,0 +1,266 @@ +We provide diverse examples about fine-tuning LLMs. + +Make sure to execute these commands in the `LLaMA-Factory` directory. + +## Table of Contents + +- [LoRA Fine-Tuning](#lora-fine-tuning) +- [QLoRA Fine-Tuning](#qlora-fine-tuning) +- [Full-Parameter Fine-Tuning](#full-parameter-fine-tuning) +- [Merging LoRA Adapters and Quantization](#merging-lora-adapters-and-quantization) +- [Inferring LoRA Fine-Tuned Models](#inferring-lora-fine-tuned-models) +- [Extras](#extras) + +Use `CUDA_VISIBLE_DEVICES` (GPU) or `ASCEND_RT_VISIBLE_DEVICES` (NPU) to choose computing devices. + +By default, LLaMA-Factory uses all visible computing devices. + +## Examples + +### LoRA Fine-Tuning + +#### (Continuous) Pre-Training + +```bash +llamafactory-cli train examples/train_lora/llama3_lora_pretrain.yaml +``` + +#### Supervised Fine-Tuning + +```bash +llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml +``` + +#### Multimodal Supervised Fine-Tuning + +```bash +llamafactory-cli train examples/train_lora/llava1_5_lora_sft.yaml +llamafactory-cli train examples/train_lora/qwen2vl_lora_sft.yaml +``` + +#### DPO/ORPO/SimPO Training + +```bash +llamafactory-cli train examples/train_lora/llama3_lora_dpo.yaml +``` + +#### Multimodal DPO/ORPO/SimPO Training + +```bash +llamafactory-cli train examples/train_lora/qwen2vl_lora_dpo.yaml +``` + +#### Reward Modeling + +```bash +llamafactory-cli train examples/train_lora/llama3_lora_reward.yaml +``` + +#### PPO Training + +```bash +llamafactory-cli train examples/train_lora/llama3_lora_ppo.yaml +``` + +#### KTO Training + +```bash +llamafactory-cli train examples/train_lora/llama3_lora_kto.yaml +``` + +#### Preprocess Dataset + +It is useful for large dataset, use `tokenized_path` in config to load the preprocessed dataset. + +```bash +llamafactory-cli train examples/train_lora/llama3_preprocess.yaml +``` + +#### Evaluating on MMLU/CMMLU/C-Eval Benchmarks + +```bash +llamafactory-cli eval examples/train_lora/llama3_lora_eval.yaml +``` + +#### Supervised Fine-Tuning on Multiple Nodes + +```bash +FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml +FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml +``` + +#### Supervised Fine-Tuning with DeepSpeed ZeRO-3 (Weight Sharding) + +```bash +FORCE_TORCHRUN=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ds3.yaml +``` + +#### Supervised Fine-Tuning with Ray on 4 GPUs + +```bash +USE_RAY=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ray.yaml +``` + +### QLoRA Fine-Tuning + +#### Supervised Fine-Tuning with 4/8-bit Bitsandbytes/HQQ/EETQ Quantization (Recommended) + +```bash +llamafactory-cli train examples/train_qlora/llama3_lora_sft_otfq.yaml +``` + +#### Supervised Fine-Tuning with 4-bit Bitsandbytes Quantization on Ascend NPU + +```bash +llamafactory-cli train examples/train_qlora/llama3_lora_sft_bnb_npu.yaml +``` + +#### Supervised Fine-Tuning with 4/8-bit GPTQ Quantization + +```bash +llamafactory-cli train examples/train_qlora/llama3_lora_sft_gptq.yaml +``` + +#### Supervised Fine-Tuning with 4-bit AWQ Quantization + +```bash +llamafactory-cli train examples/train_qlora/llama3_lora_sft_awq.yaml +``` + +#### Supervised Fine-Tuning with 2-bit AQLM Quantization + +```bash +llamafactory-cli train examples/train_qlora/llama3_lora_sft_aqlm.yaml +``` + +### Full-Parameter Fine-Tuning + +#### Supervised Fine-Tuning on Single Node + +```bash +FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/llama3_full_sft.yaml +``` + +#### Supervised Fine-Tuning on Multiple Nodes + +```bash +FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml +FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml +``` + +#### Multimodal Supervised Fine-Tuning + +```bash +FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/qwen2vl_full_sft.yaml +``` + +### Merging LoRA Adapters and Quantization + +#### Merge LoRA Adapters + +Note: DO NOT use quantized model or `quantization_bit` when merging LoRA adapters. + +```bash +llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml +``` + +#### Quantizing Model using AutoGPTQ + +```bash +llamafactory-cli export examples/merge_lora/llama3_gptq.yaml +``` + +### Save Ollama modelfile + +```bash +llamafactory-cli export examples/merge_lora/llama3_full_sft.yaml +``` + +### Inferring LoRA Fine-Tuned Models + +#### Batch Generation using vLLM Tensor Parallel + +``` +python scripts/vllm_infer.py --model_name_or_path path_to_merged_model --dataset alpaca_en_demo +``` + +#### Use CLI ChatBox + +```bash +llamafactory-cli chat examples/inference/llama3_lora_sft.yaml +``` + +#### Use Web UI ChatBox + +```bash +llamafactory-cli webchat examples/inference/llama3_lora_sft.yaml +``` + +#### Launch OpenAI-style API + +```bash +llamafactory-cli api examples/inference/llama3_lora_sft.yaml +``` + +### Extras + +#### Full-Parameter Fine-Tuning using GaLore + +```bash +llamafactory-cli train examples/extras/galore/llama3_full_sft.yaml +``` + +#### Full-Parameter Fine-Tuning using APOLLO + +```bash +llamafactory-cli train examples/extras/apollo/llama3_full_sft.yaml +``` + +#### Full-Parameter Fine-Tuning using BAdam + +```bash +llamafactory-cli train examples/extras/badam/llama3_full_sft.yaml +``` + +#### Full-Parameter Fine-Tuning using Adam-mini + +```bash +llamafactory-cli train examples/extras/adam_mini/qwen2_full_sft.yaml +``` + +#### LoRA+ Fine-Tuning + +```bash +llamafactory-cli train examples/extras/loraplus/llama3_lora_sft.yaml +``` + +#### PiSSA Fine-Tuning + +```bash +llamafactory-cli train examples/extras/pissa/llama3_lora_sft.yaml +``` + +#### Mixture-of-Depths Fine-Tuning + +```bash +llamafactory-cli train examples/extras/mod/llama3_full_sft.yaml +``` + +#### LLaMA-Pro Fine-Tuning + +```bash +bash examples/extras/llama_pro/expand.sh +llamafactory-cli train examples/extras/llama_pro/llama3_freeze_sft.yaml +``` + +#### FSDP+QLoRA Fine-Tuning + +```bash +bash examples/extras/fsdp_qlora/train.sh +``` + +#### Computing BLEU and ROUGE Scores + +```bash +llamafactory-cli train examples/extras/nlg_eval/llama3_lora_predict.yaml +``` diff --git a/post-training/LLaMA-Factory/examples/README_zh.md b/post-training/LLaMA-Factory/examples/README_zh.md new file mode 100644 index 0000000..4899e27 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/README_zh.md @@ -0,0 +1,266 @@ +我们提供了多样化的大模型微调示例脚本。 + +请确保在 `LLaMA-Factory` 目录下执行下述命令。 + +## 目录 + +- [LoRA 微调](#lora-微调) +- [QLoRA 微调](#qlora-微调) +- [全参数微调](#全参数微调) +- [合并 LoRA 适配器与模型量化](#合并-lora-适配器与模型量化) +- [推理 LoRA 模型](#推理-lora-模型) +- [杂项](#杂项) + +使用 `CUDA_VISIBLE_DEVICES`(GPU)或 `ASCEND_RT_VISIBLE_DEVICES`(NPU)选择计算设备。 + +LLaMA-Factory 默认使用所有可见的计算设备。 + +## 示例 + +### LoRA 微调 + +#### (增量)预训练 + +```bash +llamafactory-cli train examples/train_lora/llama3_lora_pretrain.yaml +``` + +#### 指令监督微调 + +```bash +llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml +``` + +#### 多模态指令监督微调 + +```bash +llamafactory-cli train examples/train_lora/llava1_5_lora_sft.yaml +llamafactory-cli train examples/train_lora/qwen2vl_lora_sft.yaml +``` + +#### DPO/ORPO/SimPO 训练 + +```bash +llamafactory-cli train examples/train_lora/llama3_lora_dpo.yaml +``` + +#### 多模态 DPO/ORPO/SimPO 训练 + +```bash +llamafactory-cli train examples/train_lora/qwen2vl_lora_dpo.yaml +``` + +#### 奖励模型训练 + +```bash +llamafactory-cli train examples/train_lora/llama3_lora_reward.yaml +``` + +#### PPO 训练 + +```bash +llamafactory-cli train examples/train_lora/llama3_lora_ppo.yaml +``` + +#### KTO 训练 + +```bash +llamafactory-cli train examples/train_lora/llama3_lora_kto.yaml +``` + +#### 预处理数据集 + +对于大数据集有帮助,在配置中使用 `tokenized_path` 以加载预处理后的数据集。 + +```bash +llamafactory-cli train examples/train_lora/llama3_preprocess.yaml +``` + +#### 在 MMLU/CMMLU/C-Eval 上评估 + +```bash +llamafactory-cli eval examples/train_lora/llama3_lora_eval.yaml +``` + +#### 多机指令监督微调 + +```bash +FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml +FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml +``` + +#### 使用 DeepSpeed ZeRO-3 平均分配显存 + +```bash +FORCE_TORCHRUN=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ds3.yaml +``` + +#### 使用 Ray 在 4 张 GPU 上微调 + +```bash +USE_RAY=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ray.yaml +``` + +### QLoRA 微调 + +#### 基于 4/8 比特 Bitsandbytes/HQQ/EETQ 量化进行指令监督微调(推荐) + +```bash +llamafactory-cli train examples/train_qlora/llama3_lora_sft_otfq.yaml +``` + +#### 在 NPU 上基于 4 比特 Bitsandbytes 量化进行指令监督微调 + +```bash +llamafactory-cli train examples/train_qlora/llama3_lora_sft_bnb_npu.yaml +``` + +#### 基于 4/8 比特 GPTQ 量化进行指令监督微调 + +```bash +llamafactory-cli train examples/train_qlora/llama3_lora_sft_gptq.yaml +``` + +#### 基于 4 比特 AWQ 量化进行指令监督微调 + +```bash +llamafactory-cli train examples/train_qlora/llama3_lora_sft_awq.yaml +``` + +#### 基于 2 比特 AQLM 量化进行指令监督微调 + +```bash +llamafactory-cli train examples/train_qlora/llama3_lora_sft_aqlm.yaml +``` + +### 全参数微调 + +#### 在单机上进行指令监督微调 + +```bash +FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/llama3_full_sft.yaml +``` + +#### 在多机上进行指令监督微调 + +```bash +FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml +FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml +``` + +#### 多模态指令监督微调 + +```bash +FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/qwen2vl_full_sft.yaml +``` + +### 合并 LoRA 适配器与模型量化 + +#### 合并 LoRA 适配器 + +注:请勿使用量化后的模型或 `quantization_bit` 参数来合并 LoRA 适配器。 + +```bash +llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml +``` + +#### 使用 AutoGPTQ 量化模型 + +```bash +llamafactory-cli export examples/merge_lora/llama3_gptq.yaml +``` + +### 保存 Ollama 配置文件 + +```bash +llamafactory-cli export examples/merge_lora/llama3_full_sft.yaml +``` + +### 推理 LoRA 模型 + +#### 使用 vLLM+TP 批量推理 + +``` +python scripts/vllm_infer.py --model_name_or_path path_to_merged_model --dataset alpaca_en_demo +``` + +#### 使用命令行对话框 + +```bash +llamafactory-cli chat examples/inference/llama3_lora_sft.yaml +``` + +#### 使用浏览器对话框 + +```bash +llamafactory-cli webchat examples/inference/llama3_lora_sft.yaml +``` + +#### 启动 OpenAI 风格 API + +```bash +llamafactory-cli api examples/inference/llama3_lora_sft.yaml +``` + +### 杂项 + +#### 使用 GaLore 进行全参数训练 + +```bash +llamafactory-cli train examples/extras/galore/llama3_full_sft.yaml +``` + +#### 使用 APOLLO 进行全参数训练 + +```bash +llamafactory-cli train examples/extras/apollo/llama3_full_sft.yaml +``` + +#### 使用 BAdam 进行全参数训练 + +```bash +llamafactory-cli train examples/extras/badam/llama3_full_sft.yaml +``` + +#### 使用 Adam-mini 进行全参数训练 + +```bash +llamafactory-cli train examples/extras/adam_mini/qwen2_full_sft.yaml +``` + +#### LoRA+ 微调 + +```bash +llamafactory-cli train examples/extras/loraplus/llama3_lora_sft.yaml +``` + +#### PiSSA 微调 + +```bash +llamafactory-cli train examples/extras/pissa/llama3_lora_sft.yaml +``` + +#### 深度混合微调 + +```bash +llamafactory-cli train examples/extras/mod/llama3_full_sft.yaml +``` + +#### LLaMA-Pro 微调 + +```bash +bash examples/extras/llama_pro/expand.sh +llamafactory-cli train examples/extras/llama_pro/llama3_freeze_sft.yaml +``` + +#### FSDP+QLoRA 微调 + +```bash +bash examples/extras/fsdp_qlora/train.sh +``` + +#### 计算 BLEU 和 ROUGE 分数 + +```bash +llamafactory-cli train examples/extras/nlg_eval/llama3_lora_predict.yaml +``` diff --git a/post-training/LLaMA-Factory/examples/accelerate/fsdp_config.yaml b/post-training/LLaMA-Factory/examples/accelerate/fsdp_config.yaml new file mode 100644 index 0000000..09d2f5d --- /dev/null +++ b/post-training/LLaMA-Factory/examples/accelerate/fsdp_config.yaml @@ -0,0 +1,25 @@ +compute_environment: LOCAL_MACHINE +debug: false +distributed_type: FSDP +downcast_bf16: 'no' +fsdp_config: + fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP + fsdp_backward_prefetch: BACKWARD_PRE + fsdp_forward_prefetch: false + fsdp_cpu_ram_efficient_loading: true + fsdp_offload_params: false + fsdp_sharding_strategy: FULL_SHARD + fsdp_state_dict_type: FULL_STATE_DICT + fsdp_sync_module_states: true + fsdp_use_orig_params: true +machine_rank: 0 +main_training_function: main +mixed_precision: bf16 # or fp16 +num_machines: 1 # the number of nodes +num_processes: 2 # the number of GPUs in all nodes +rdzv_backend: static +same_network: true +tpu_env: [] +tpu_use_cluster: false +tpu_use_sudo: false +use_cpu: false diff --git a/post-training/LLaMA-Factory/examples/accelerate/fsdp_config_offload.yaml b/post-training/LLaMA-Factory/examples/accelerate/fsdp_config_offload.yaml new file mode 100644 index 0000000..a55e652 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/accelerate/fsdp_config_offload.yaml @@ -0,0 +1,25 @@ +compute_environment: LOCAL_MACHINE +debug: false +distributed_type: FSDP +downcast_bf16: 'no' +fsdp_config: + fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP + fsdp_backward_prefetch: BACKWARD_PRE + fsdp_forward_prefetch: false + fsdp_cpu_ram_efficient_loading: true + fsdp_offload_params: true # offload may affect training speed + fsdp_sharding_strategy: FULL_SHARD + fsdp_state_dict_type: FULL_STATE_DICT + fsdp_sync_module_states: true + fsdp_use_orig_params: true +machine_rank: 0 +main_training_function: main +mixed_precision: bf16 # or fp16 +num_machines: 1 # the number of nodes +num_processes: 2 # the number of GPUs in all nodes +rdzv_backend: static +same_network: true +tpu_env: [] +tpu_use_cluster: false +tpu_use_sudo: false +use_cpu: false diff --git a/post-training/LLaMA-Factory/examples/deepspeed/ds_z0_config.json b/post-training/LLaMA-Factory/examples/deepspeed/ds_z0_config.json new file mode 100644 index 0000000..8ac9918 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/deepspeed/ds_z0_config.json @@ -0,0 +1,28 @@ +{ + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "zero_allow_untested_optimizer": true, + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "bf16": { + "enabled": "auto" + }, + "zero_optimization": { + "stage": 0, + "allgather_partitions": true, + "allgather_bucket_size": 5e8, + "overlap_comm": false, + "reduce_scatter": true, + "reduce_bucket_size": 5e8, + "contiguous_gradients": true, + "round_robin_gradients": true + } +} diff --git a/post-training/LLaMA-Factory/examples/deepspeed/ds_z2_config.json b/post-training/LLaMA-Factory/examples/deepspeed/ds_z2_config.json new file mode 100644 index 0000000..c4177e5 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/deepspeed/ds_z2_config.json @@ -0,0 +1,28 @@ +{ + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "zero_allow_untested_optimizer": true, + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "bf16": { + "enabled": "auto" + }, + "zero_optimization": { + "stage": 2, + "allgather_partitions": true, + "allgather_bucket_size": 5e8, + "overlap_comm": false, + "reduce_scatter": true, + "reduce_bucket_size": 5e8, + "contiguous_gradients": true, + "round_robin_gradients": true + } +} diff --git a/post-training/LLaMA-Factory/examples/deepspeed/ds_z2_offload_config.json b/post-training/LLaMA-Factory/examples/deepspeed/ds_z2_offload_config.json new file mode 100644 index 0000000..7550472 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/deepspeed/ds_z2_offload_config.json @@ -0,0 +1,32 @@ +{ + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "zero_allow_untested_optimizer": true, + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "bf16": { + "enabled": "auto" + }, + "zero_optimization": { + "stage": 2, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "allgather_partitions": true, + "allgather_bucket_size": 5e8, + "overlap_comm": false, + "reduce_scatter": true, + "reduce_bucket_size": 5e8, + "contiguous_gradients": true, + "round_robin_gradients": true + } +} diff --git a/post-training/LLaMA-Factory/examples/deepspeed/ds_z3_config.json b/post-training/LLaMA-Factory/examples/deepspeed/ds_z3_config.json new file mode 100644 index 0000000..46584a7 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/deepspeed/ds_z3_config.json @@ -0,0 +1,30 @@ +{ + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "zero_allow_untested_optimizer": true, + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "bf16": { + "enabled": "auto" + }, + "zero_optimization": { + "stage": 3, + "overlap_comm": false, + "contiguous_gradients": true, + "sub_group_size": 1e9, + "reduce_bucket_size": "auto", + "stage3_prefetch_bucket_size": "auto", + "stage3_param_persistence_threshold": "auto", + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": true + } +} diff --git a/post-training/LLaMA-Factory/examples/deepspeed/ds_z3_offload_config.json b/post-training/LLaMA-Factory/examples/deepspeed/ds_z3_offload_config.json new file mode 100644 index 0000000..0fabebb --- /dev/null +++ b/post-training/LLaMA-Factory/examples/deepspeed/ds_z3_offload_config.json @@ -0,0 +1,38 @@ +{ + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "zero_allow_untested_optimizer": true, + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "bf16": { + "enabled": "auto" + }, + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "offload_param": { + "device": "cpu", + "pin_memory": true + }, + "overlap_comm": false, + "contiguous_gradients": true, + "sub_group_size": 1e9, + "reduce_bucket_size": "auto", + "stage3_prefetch_bucket_size": "auto", + "stage3_param_persistence_threshold": "auto", + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": true + } +} diff --git a/post-training/LLaMA-Factory/examples/extras/adam_mini/qwen2_full_sft.yaml b/post-training/LLaMA-Factory/examples/extras/adam_mini/qwen2_full_sft.yaml new file mode 100644 index 0000000..79df9a7 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/extras/adam_mini/qwen2_full_sft.yaml @@ -0,0 +1,43 @@ +### model +model_name_or_path: Qwen/Qwen2-1.5B-Instruct +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: full +use_adam_mini: true + +### dataset +dataset: identity,alpaca_en_demo +template: qwen +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/qwen2-1_5b/full/sft +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-5 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 + +### eval +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/extras/apollo/llama3_full_sft.yaml b/post-training/LLaMA-Factory/examples/extras/apollo/llama3_full_sft.yaml new file mode 100644 index 0000000..d9fb6c2 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/extras/apollo/llama3_full_sft.yaml @@ -0,0 +1,48 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: full +use_apollo: true +apollo_layerwise: true # choices: [true, false], use false for DDP training +apollo_target: all +apollo_rank: 128 +apollo_scale: 32.0 +apollo_scale_type: channel + +### dataset +dataset: identity,alpaca_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b/full/sft +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 1 # use 1 for layerwise apollo +learning_rate: 1.0e-5 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +pure_bf16: true +ddp_timeout: 180000000 + +### eval +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/extras/badam/llama3_full_sft.yaml b/post-training/LLaMA-Factory/examples/extras/badam/llama3_full_sft.yaml new file mode 100644 index 0000000..7ce3323 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/extras/badam/llama3_full_sft.yaml @@ -0,0 +1,46 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: full +use_badam: true +badam_mode: layer +badam_switch_mode: ascending +badam_switch_interval: 50 +badam_verbose: 2 +# deepspeed: examples/deepspeed/ds_z3_config.json + +### dataset +dataset: identity,alpaca_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b/full/sft +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-5 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 + +### eval +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/extras/fsdp_qlora/llama3_lora_sft.yaml b/post-training/LLaMA-Factory/examples/extras/fsdp_qlora/llama3_lora_sft.yaml new file mode 100644 index 0000000..1a8d974 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/extras/fsdp_qlora/llama3_lora_sft.yaml @@ -0,0 +1,45 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +quantization_bit: 4 +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all + +### dataset +dataset: identity,alpaca_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b/lora/sft +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 + +### eval +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/extras/fsdp_qlora/train.sh b/post-training/LLaMA-Factory/examples/extras/fsdp_qlora/train.sh new file mode 100644 index 0000000..fac8cde --- /dev/null +++ b/post-training/LLaMA-Factory/examples/extras/fsdp_qlora/train.sh @@ -0,0 +1,6 @@ +#!/bin/bash +# DO NOT use GPTQ/AWQ model in FSDP+QLoRA + +CUDA_VISIBLE_DEVICES=0,1 accelerate launch \ + --config_file examples/accelerate/fsdp_config.yaml \ + src/train.py examples/extras/fsdp_qlora/llama3_lora_sft.yaml diff --git a/post-training/LLaMA-Factory/examples/extras/galore/llama3_full_sft.yaml b/post-training/LLaMA-Factory/examples/extras/galore/llama3_full_sft.yaml new file mode 100644 index 0000000..9973093 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/extras/galore/llama3_full_sft.yaml @@ -0,0 +1,47 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: full +use_galore: true +galore_layerwise: true # choices: [true, false], use false for DDP training +galore_target: all +galore_rank: 128 +galore_scale: 2.0 + +### dataset +dataset: identity,alpaca_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b/full/sft +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 1 # use 1 for layerwise galore +learning_rate: 1.0e-5 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +pure_bf16: true +ddp_timeout: 180000000 + +### eval +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/extras/llama_pro/expand.sh b/post-training/LLaMA-Factory/examples/extras/llama_pro/expand.sh new file mode 100644 index 0000000..9f3c013 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/extras/llama_pro/expand.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +python scripts/llama_pro.py \ + --model_name_or_path meta-llama/Meta-Llama-3-8B-Instruct \ + --output_dir models/llama3-8b-pro \ + --num_expand 8 diff --git a/post-training/LLaMA-Factory/examples/extras/llama_pro/llama3_freeze_sft.yaml b/post-training/LLaMA-Factory/examples/extras/llama_pro/llama3_freeze_sft.yaml new file mode 100644 index 0000000..6c5efb8 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/extras/llama_pro/llama3_freeze_sft.yaml @@ -0,0 +1,45 @@ +### model +model_name_or_path: models/llama3-8b-pro +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: freeze +freeze_trainable_layers: 8 +freeze_trainable_modules: all +use_llama_pro: true + +### dataset +dataset: identity,alpaca_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b-pro/freeze/sft +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 + +### eval +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/extras/loraplus/llama3_lora_sft.yaml b/post-training/LLaMA-Factory/examples/extras/loraplus/llama3_lora_sft.yaml new file mode 100644 index 0000000..574b487 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/extras/loraplus/llama3_lora_sft.yaml @@ -0,0 +1,45 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all +loraplus_lr_ratio: 16.0 + +### dataset +dataset: identity,alpaca_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b/lora/sft +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 + +### eval +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/extras/mod/llama3_full_sft.yaml b/post-training/LLaMA-Factory/examples/extras/mod/llama3_full_sft.yaml new file mode 100644 index 0000000..ed784e7 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/extras/mod/llama3_full_sft.yaml @@ -0,0 +1,44 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: full +mixture_of_depths: convert + +### dataset +dataset: identity,alpaca_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b-mod/full/sft +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +optim: paged_adamw_8bit +learning_rate: 1.0e-5 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +pure_bf16: true +ddp_timeout: 180000000 + +### eval +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/extras/nlg_eval/llama3_lora_predict.yaml b/post-training/LLaMA-Factory/examples/extras/nlg_eval/llama3_lora_predict.yaml new file mode 100644 index 0000000..be51c2e --- /dev/null +++ b/post-training/LLaMA-Factory/examples/extras/nlg_eval/llama3_lora_predict.yaml @@ -0,0 +1,31 @@ +# The batch generation can be SLOW using this config. +# For faster inference, we recommend to use `scripts/vllm_infer.py`. + +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +adapter_name_or_path: saves/llama3-8b/lora/sft +trust_remote_code: true + +### method +stage: sft +do_predict: true +finetuning_type: lora + +### dataset +eval_dataset: identity,alpaca_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 50 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b/lora/predict +overwrite_output_dir: true +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### eval +per_device_eval_batch_size: 1 +predict_with_generate: true +ddp_timeout: 180000000 diff --git a/post-training/LLaMA-Factory/examples/extras/pissa/init.sh b/post-training/LLaMA-Factory/examples/extras/pissa/init.sh new file mode 100644 index 0000000..11e1e35 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/extras/pissa/init.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +python scripts/pissa_init.py \ + --model_name_or_path meta-llama/Meta-Llama-3-8B-Instruct \ + --output_dir models/llama3-8b-pissa diff --git a/post-training/LLaMA-Factory/examples/extras/pissa/llama3_lora_sft.yaml b/post-training/LLaMA-Factory/examples/extras/pissa/llama3_lora_sft.yaml new file mode 100644 index 0000000..1668343 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/extras/pissa/llama3_lora_sft.yaml @@ -0,0 +1,47 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all +pissa_init: true +pissa_iter: 16 +pissa_convert: true + +### dataset +dataset: identity,alpaca_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b/lora/sft +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 + +### eval +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/inference/llama3.yaml b/post-training/LLaMA-Factory/examples/inference/llama3.yaml new file mode 100644 index 0000000..2851e9a --- /dev/null +++ b/post-training/LLaMA-Factory/examples/inference/llama3.yaml @@ -0,0 +1,4 @@ +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +template: llama3 +infer_backend: huggingface # choices: [huggingface, vllm] +trust_remote_code: true diff --git a/post-training/LLaMA-Factory/examples/inference/llama3_full_sft.yaml b/post-training/LLaMA-Factory/examples/inference/llama3_full_sft.yaml new file mode 100644 index 0000000..d4555ca --- /dev/null +++ b/post-training/LLaMA-Factory/examples/inference/llama3_full_sft.yaml @@ -0,0 +1,4 @@ +model_name_or_path: saves/llama3-8b/full/sft +template: llama3 +infer_backend: huggingface # choices: [huggingface, vllm] +trust_remote_code: true diff --git a/post-training/LLaMA-Factory/examples/inference/llama3_lora_sft.yaml b/post-training/LLaMA-Factory/examples/inference/llama3_lora_sft.yaml new file mode 100644 index 0000000..7796c52 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/inference/llama3_lora_sft.yaml @@ -0,0 +1,5 @@ +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +adapter_name_or_path: saves/llama3-8b/lora/sft +template: llama3 +infer_backend: huggingface # choices: [huggingface, vllm] +trust_remote_code: true diff --git a/post-training/LLaMA-Factory/examples/inference/llama3_sglang.yaml b/post-training/LLaMA-Factory/examples/inference/llama3_sglang.yaml new file mode 100644 index 0000000..8241898 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/inference/llama3_sglang.yaml @@ -0,0 +1,4 @@ +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +template: llama3 +infer_backend: sglang +trust_remote_code: true diff --git a/post-training/LLaMA-Factory/examples/inference/llama3_vllm.yaml b/post-training/LLaMA-Factory/examples/inference/llama3_vllm.yaml new file mode 100644 index 0000000..4379956 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/inference/llama3_vllm.yaml @@ -0,0 +1,5 @@ +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +template: llama3 +infer_backend: vllm +vllm_enforce_eager: true +trust_remote_code: true diff --git a/post-training/LLaMA-Factory/examples/inference/llava1_5.yaml b/post-training/LLaMA-Factory/examples/inference/llava1_5.yaml new file mode 100644 index 0000000..2e934dd --- /dev/null +++ b/post-training/LLaMA-Factory/examples/inference/llava1_5.yaml @@ -0,0 +1,4 @@ +model_name_or_path: llava-hf/llava-1.5-7b-hf +template: llava +infer_backend: huggingface # choices: [huggingface, vllm] +trust_remote_code: true diff --git a/post-training/LLaMA-Factory/examples/inference/qwen2_vl.yaml b/post-training/LLaMA-Factory/examples/inference/qwen2_vl.yaml new file mode 100644 index 0000000..b5eabc6 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/inference/qwen2_vl.yaml @@ -0,0 +1,4 @@ +model_name_or_path: Qwen/Qwen2-VL-7B-Instruct +template: qwen2_vl +infer_backend: huggingface # choices: [huggingface, vllm] +trust_remote_code: true diff --git a/post-training/LLaMA-Factory/examples/merge_lora/llama3_full_sft.yaml b/post-training/LLaMA-Factory/examples/merge_lora/llama3_full_sft.yaml new file mode 100644 index 0000000..4e329fa --- /dev/null +++ b/post-training/LLaMA-Factory/examples/merge_lora/llama3_full_sft.yaml @@ -0,0 +1,10 @@ +### model +model_name_or_path: saves/llama3-8b/full/sft +template: llama3 +trust_remote_code: true + +### export +export_dir: output/llama3_full_sft +export_size: 5 +export_device: cpu +export_legacy_format: false diff --git a/post-training/LLaMA-Factory/examples/merge_lora/llama3_gptq.yaml b/post-training/LLaMA-Factory/examples/merge_lora/llama3_gptq.yaml new file mode 100644 index 0000000..3a2d909 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/merge_lora/llama3_gptq.yaml @@ -0,0 +1,12 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +template: llama3 +trust_remote_code: true + +### export +export_dir: output/llama3_gptq +export_quantization_bit: 4 +export_quantization_dataset: data/c4_demo.json +export_size: 5 +export_device: cpu +export_legacy_format: false diff --git a/post-training/LLaMA-Factory/examples/merge_lora/llama3_lora_sft.yaml b/post-training/LLaMA-Factory/examples/merge_lora/llama3_lora_sft.yaml new file mode 100644 index 0000000..97bb457 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/merge_lora/llama3_lora_sft.yaml @@ -0,0 +1,13 @@ +### Note: DO NOT use quantized model or quantization_bit when merging lora adapters + +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +adapter_name_or_path: saves/llama3-8b/lora/sft +template: llama3 +trust_remote_code: true + +### export +export_dir: output/llama3_lora_sft +export_size: 5 +export_device: cpu +export_legacy_format: false diff --git a/post-training/LLaMA-Factory/examples/merge_lora/qwen2vl_lora_sft.yaml b/post-training/LLaMA-Factory/examples/merge_lora/qwen2vl_lora_sft.yaml new file mode 100644 index 0000000..103dbcd --- /dev/null +++ b/post-training/LLaMA-Factory/examples/merge_lora/qwen2vl_lora_sft.yaml @@ -0,0 +1,13 @@ +### Note: DO NOT use quantized model or quantization_bit when merging lora adapters + +### model +model_name_or_path: Qwen/Qwen2-VL-7B-Instruct +adapter_name_or_path: saves/qwen2_vl-7b/lora/sft +template: qwen2_vl +trust_remote_code: true + +### export +export_dir: output/qwen2_vl_lora_sft +export_size: 5 +export_device: cpu +export_legacy_format: false diff --git a/post-training/LLaMA-Factory/examples/train_full/llama3_full_sft.yaml b/post-training/LLaMA-Factory/examples/train_full/llama3_full_sft.yaml new file mode 100644 index 0000000..fb7066a --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_full/llama3_full_sft.yaml @@ -0,0 +1,45 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: full +deepspeed: examples/deepspeed/ds_z3_config.json # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json] + +### dataset +dataset: identity,alpaca_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b/full/sft +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 2 +learning_rate: 1.0e-5 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +resume_from_checkpoint: null + +### eval +# eval_dataset: alpaca_en_demo +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/train_full/qwen2vl_full_sft_3b.yaml b/post-training/LLaMA-Factory/examples/train_full/qwen2vl_full_sft_3b.yaml new file mode 100644 index 0000000..f9773f3 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_full/qwen2vl_full_sft_3b.yaml @@ -0,0 +1,49 @@ +### model +model_name_or_path: Qwen/Qwen2.5-VL-3B-Instruct +image_max_pixels: 1843200 # 1280*720*2 +video_max_pixels: 16384 +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: full +# freeze_trainable_layers: 1 +# freeze_trainable_modules: all +freeze_vision_tower: true # choices: [true, false] +freeze_multi_modal_projector: false # choices: [true, false] +freeze_language_model: false +deepspeed: examples/deepspeed/ds_z3_config.json # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json] +# deepspeed: examples/deepspeed/ds_z3_offload_config.json + +### dataset +dataset: websight_toy +template: qwen2_vl +cutoff_len: 8192 +max_samples: 9000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: saves/websight/toy/3b +# output_dir: saves/mrweb/fulll +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 2 +learning_rate: 1.0e-5 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 + +### eval +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/train_full/qwen2vl_full_sft_7b.yaml b/post-training/LLaMA-Factory/examples/train_full/qwen2vl_full_sft_7b.yaml new file mode 100644 index 0000000..9f46674 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_full/qwen2vl_full_sft_7b.yaml @@ -0,0 +1,49 @@ +### model +model_name_or_path: Qwen/Qwen2.5-VL-7B-Instruct +image_max_pixels: 1843200 # 1280*720*2 +video_max_pixels: 16384 +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: full +# freeze_trainable_layers: 1 +# freeze_trainable_modules: all +freeze_vision_tower: true # choices: [true, false] +freeze_multi_modal_projector: false # choices: [true, false] +train_mm_proj_only: false # choices: [true, false] +deepspeed: examples/deepspeed/ds_z3_config.json # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json] +# deepspeed: examples/deepspeed/ds_z3_offload_config.json + +### dataset +dataset: websight +template: qwen2_vl +cutoff_len: 8192 +max_samples: 9000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: saves/websight/full/7b +# output_dir: saves/mrweb/fulll +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 2 +learning_rate: 1.0e-5 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 + +### eval +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_dpo.yaml b/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_dpo.yaml new file mode 100644 index 0000000..fd8c042 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_dpo.yaml @@ -0,0 +1,48 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +trust_remote_code: true + +### method +stage: dpo +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all +pref_beta: 0.1 +pref_loss: sigmoid # choices: [sigmoid (dpo), orpo, simpo] + +### dataset +dataset: dpo_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b/lora/dpo +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 5.0e-6 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +resume_from_checkpoint: null + +### eval +# eval_dataset: dpo_en_demo +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_eval.yaml b/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_eval.yaml new file mode 100644 index 0000000..60d7c2f --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_eval.yaml @@ -0,0 +1,19 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +adapter_name_or_path: saves/llama3-8b/lora/sft +trust_remote_code: true + +### method +finetuning_type: lora + +### dataset +task: mmlu_test # choices: [mmlu_test, ceval_validation, cmmlu_test] +template: fewshot +lang: en +n_shot: 5 + +### output +save_dir: saves/llama3-8b/lora/eval + +### eval +batch_size: 4 diff --git a/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_kto.yaml b/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_kto.yaml new file mode 100644 index 0000000..113b912 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_kto.yaml @@ -0,0 +1,44 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +trust_remote_code: true + +### method +stage: kto +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all +pref_beta: 0.1 + +### dataset +dataset: kto_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b/lora/kto +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 5.0e-6 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 + +### eval +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_ppo.yaml b/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_ppo.yaml new file mode 100644 index 0000000..8794481 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_ppo.yaml @@ -0,0 +1,43 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +reward_model: saves/llama3-8b/lora/reward +trust_remote_code: true + +### method +stage: ppo +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all + +### dataset +dataset: identity,alpaca_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b/lora/ppo +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-5 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 + +### generate +max_new_tokens: 512 +top_k: 0 +top_p: 0.9 diff --git a/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_pretrain.yaml b/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_pretrain.yaml new file mode 100644 index 0000000..3c851d7 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_pretrain.yaml @@ -0,0 +1,45 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +trust_remote_code: true + +### method +stage: pt +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all + +### dataset +dataset: c4_demo +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b/lora/pretrain +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +resume_from_checkpoint: null + +### eval +# eval_dataset: c4_demo +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_reward.yaml b/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_reward.yaml new file mode 100644 index 0000000..48230b5 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_reward.yaml @@ -0,0 +1,46 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +trust_remote_code: true + +### method +stage: rm +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all + +### dataset +dataset: dpo_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b/lora/reward +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +resume_from_checkpoint: null + +### eval +# eval_dataset: dpo_en_demo +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_sft.yaml b/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_sft.yaml new file mode 100644 index 0000000..157d661 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_sft.yaml @@ -0,0 +1,46 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all + +### dataset +dataset: identity,alpaca_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b/lora/sft +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +resume_from_checkpoint: null + +### eval +# eval_dataset: alpaca_en_demo +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_sft_ds3.yaml b/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_sft_ds3.yaml new file mode 100644 index 0000000..e20b351 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_sft_ds3.yaml @@ -0,0 +1,47 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all +deepspeed: examples/deepspeed/ds_z3_config.json # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json] + +### dataset +dataset: identity,alpaca_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b/lora/sft +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 2 +learning_rate: 1.0e-4 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +resume_from_checkpoint: null + +### eval +# eval_dataset: alpaca_en_demo +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_sft_ray.yaml b/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_sft_ray.yaml new file mode 100644 index 0000000..8c03bf9 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_lora/llama3_lora_sft_ray.yaml @@ -0,0 +1,61 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct # or use local absolute path +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all + +### dataset +dataset: identity,alpaca_en_demo +dataset_dir: REMOTE:llamafactory/demo_data # or use local absolute path +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: tmp_dir +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### ray +ray_run_name: llama3_8b_sft_lora +ray_storage_path: ./saves +ray_num_workers: 4 # Number of GPUs to use. +placement_strategy: PACK +resources_per_worker: + GPU: 1 +# ray_init_kwargs: +# runtime_env: +# env_vars: +# : "" +# pip: +# - emoji + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +resume_from_checkpoint: null + +### eval +# eval_dataset: alpaca_en_demo +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/train_lora/llama3_preprocess.yaml b/post-training/LLaMA-Factory/examples/train_lora/llama3_preprocess.yaml new file mode 100644 index 0000000..fbaf01f --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_lora/llama3_preprocess.yaml @@ -0,0 +1,23 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all + +### dataset +dataset: identity,alpaca_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +tokenized_path: saves/llama3-8b/dataset/sft + +### output +output_dir: saves/llama3-8b/lora/sft +overwrite_output_dir: true diff --git a/post-training/LLaMA-Factory/examples/train_lora/llama4_lora_sft_ds3.yaml b/post-training/LLaMA-Factory/examples/train_lora/llama4_lora_sft_ds3.yaml new file mode 100644 index 0000000..6c5bb7b --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_lora/llama4_lora_sft_ds3.yaml @@ -0,0 +1,49 @@ +# pip install git+https://github.com/hiyouga/transformers.git@llama4_train + +### model +model_name_or_path: meta-llama/Llama-4-Scout-17B-16E-Instruct +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all +deepspeed: examples/deepspeed/ds_z3_config.json # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json] + +### dataset +dataset: mllm_demo,identity,alpaca_en_demo +template: llama4 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama4-8b/lora/sft +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 2 +learning_rate: 1.0e-4 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +resume_from_checkpoint: null + +### eval +# eval_dataset: alpaca_en_demo +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/train_lora/llava1_5_lora_sft.yaml b/post-training/LLaMA-Factory/examples/train_lora/llava1_5_lora_sft.yaml new file mode 100644 index 0000000..63cdcae --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_lora/llava1_5_lora_sft.yaml @@ -0,0 +1,45 @@ +### model +model_name_or_path: llava-hf/llava-1.5-7b-hf +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all + +### dataset +dataset: mllm_demo +template: llava +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llava1_5-7b/lora/sft +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +resume_from_checkpoint: null + +### eval +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/train_lora/qwen2vl_lora_dpo.yaml b/post-training/LLaMA-Factory/examples/train_lora/qwen2vl_lora_dpo.yaml new file mode 100644 index 0000000..3c990b4 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_lora/qwen2vl_lora_dpo.yaml @@ -0,0 +1,49 @@ +### model +model_name_or_path: Qwen/Qwen2-VL-7B-Instruct +image_max_pixels: 262144 +video_max_pixels: 16384 +trust_remote_code: true + +### method +stage: dpo +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all +pref_beta: 0.1 +pref_loss: sigmoid # choices: [sigmoid (dpo), orpo, simpo] + +### dataset +dataset: rlhf_v +template: qwen2_vl +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/qwen2_vl-7b/lora/dpo +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 5.0e-6 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +resume_from_checkpoint: null + +### eval +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/train_lora/qwen2vl_lora_sft.yaml b/post-training/LLaMA-Factory/examples/train_lora/qwen2vl_lora_sft.yaml new file mode 100644 index 0000000..54ff984 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_lora/qwen2vl_lora_sft.yaml @@ -0,0 +1,47 @@ +### model +model_name_or_path: Qwen/Qwen2-VL-7B-Instruct +image_max_pixels: 262144 +video_max_pixels: 16384 +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all + +### dataset +dataset: mllm_demo,identity,alpaca_en_demo # video: mllm_video_demo +template: qwen2_vl +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/qwen2_vl-7b/lora/sft +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +resume_from_checkpoint: null + +### eval +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/train_qlora/llama3_lora_sft_aqlm.yaml b/post-training/LLaMA-Factory/examples/train_qlora/llama3_lora_sft_aqlm.yaml new file mode 100644 index 0000000..a7d44c7 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_qlora/llama3_lora_sft_aqlm.yaml @@ -0,0 +1,44 @@ +### model +model_name_or_path: ISTA-DASLab/Meta-Llama-3-8B-Instruct-AQLM-2Bit-1x16 +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all + +### dataset +dataset: identity,alpaca_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b/lora/sft +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 + +### eval +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/train_qlora/llama3_lora_sft_awq.yaml b/post-training/LLaMA-Factory/examples/train_qlora/llama3_lora_sft_awq.yaml new file mode 100644 index 0000000..861edfd --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_qlora/llama3_lora_sft_awq.yaml @@ -0,0 +1,44 @@ +### model +model_name_or_path: TechxGenus/Meta-Llama-3-8B-Instruct-AWQ +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all + +### dataset +dataset: identity,alpaca_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b/lora/sft +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 + +### eval +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/train_qlora/llama3_lora_sft_bnb_npu.yaml b/post-training/LLaMA-Factory/examples/train_qlora/llama3_lora_sft_bnb_npu.yaml new file mode 100644 index 0000000..d68ce66 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_qlora/llama3_lora_sft_bnb_npu.yaml @@ -0,0 +1,47 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +quantization_bit: 4 +quantization_method: bnb +double_quantization: false +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all + +### dataset +dataset: identity,alpaca_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b/lora/sft +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 + +### eval +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/train_qlora/llama3_lora_sft_gptq.yaml b/post-training/LLaMA-Factory/examples/train_qlora/llama3_lora_sft_gptq.yaml new file mode 100644 index 0000000..729d862 --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_qlora/llama3_lora_sft_gptq.yaml @@ -0,0 +1,44 @@ +### model +model_name_or_path: TechxGenus/Meta-Llama-3-8B-Instruct-GPTQ +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all + +### dataset +dataset: identity,alpaca_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b/lora/sft +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 + +### eval +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/examples/train_qlora/llama3_lora_sft_otfq.yaml b/post-training/LLaMA-Factory/examples/train_qlora/llama3_lora_sft_otfq.yaml new file mode 100644 index 0000000..1a157af --- /dev/null +++ b/post-training/LLaMA-Factory/examples/train_qlora/llama3_lora_sft_otfq.yaml @@ -0,0 +1,46 @@ +### model +model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct +quantization_bit: 4 # choices: [8 (bnb/hqq/eetq), 4 (bnb/hqq), 3 (hqq), 2 (hqq)] +quantization_method: bnb # choices: [bnb, hqq, eetq] +trust_remote_code: true + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_rank: 8 +lora_target: all + +### dataset +dataset: identity,alpaca_en_demo +template: llama3 +cutoff_len: 2048 +max_samples: 1000 +overwrite_cache: true +preprocessing_num_workers: 16 +dataloader_num_workers: 4 + +### output +output_dir: saves/llama3-8b/lora/sft +logging_steps: 10 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true +save_only_model: false +report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow] + +### train +per_device_train_batch_size: 1 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 3.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 + +### eval +# val_size: 0.1 +# per_device_eval_batch_size: 1 +# eval_strategy: steps +# eval_steps: 500 diff --git a/post-training/LLaMA-Factory/pyproject.toml b/post-training/LLaMA-Factory/pyproject.toml new file mode 100644 index 0000000..2585539 --- /dev/null +++ b/post-training/LLaMA-Factory/pyproject.toml @@ -0,0 +1,95 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llamafactory" +dynamic = [ + "version", + "dependencies", + "optional-dependencies", + "requires-python", + "scripts", + "authors", + "description", + "readme", + "license", + "keywords", + "classifiers" +] + +[tool.ruff] +target-version = "py39" +line-length = 119 +indent-width = 4 + +[tool.ruff.lint] +ignore = [ + "C408", # collection + "C901", # complex + "E501", # line too long + "E731", # lambda function + "E741", # ambiguous var name + "D100", # no doc public module + "D101", # no doc public class + "D102", # no doc public method + "D103", # no doc public function + "D104", # no doc public package + "D105", # no doc magic method + "D107", # no doc __init__ +] +extend-select = [ + "C", # complexity + "E", # error + "F", # pyflakes + "I", # isort + "W", # warning + "UP", # pyupgrade + "D", # pydocstyle + "PT009", # pytest assert + "RUF022", # sort __all__ +] + +[tool.ruff.lint.isort] +lines-after-imports = 2 +known-first-party = ["llamafactory"] +known-third-party = [ + "accelerate", + "datasets", + "gradio", + "numpy", + "peft", + "torch", + "transformers", + "trl", +] + +[tool.ruff.lint.pydocstyle] +convention = "google" + +[tool.ruff.format] +quote-style = "double" +indent-style = "space" +docstring-code-format = true +skip-magic-trailing-comma = false +line-ending = "auto" + +[tool.uv] +conflicts = [ + [ + { extra = "torch-npu" }, + { extra = "aqlm" }, + ], + [ + { extra = "torch-npu" }, + { extra = "liger-kernel" }, + ], + [ + { extra = "torch-npu" }, + { extra = "vllm" }, + ], + [ + { extra = "sglang" }, + { extra = "minicpm_v" }, + ], +] diff --git a/post-training/LLaMA-Factory/requirements.txt b/post-training/LLaMA-Factory/requirements.txt new file mode 100644 index 0000000..c818bb2 --- /dev/null +++ b/post-training/LLaMA-Factory/requirements.txt @@ -0,0 +1,25 @@ +transformers>=4.45.0,<=4.51.3,!=4.46.*,!=4.47.*,!=4.48.0 +datasets>=2.16.0,<=3.5.0 +accelerate>=0.34.0,<=1.6.0 +peft>=0.14.0,<=0.15.1 +trl>=0.8.6,<=0.9.6 +tokenizers>=0.19.0,<=0.21.1 +gradio>=4.38.0,<=5.25.0 +scipy +einops +sentencepiece +tiktoken +protobuf +uvicorn +fastapi +sse-starlette +matplotlib>=3.7.0 +fire +packaging +pyyaml +numpy<2.0.0 +pydantic<=2.10.6 +pandas>=2.0.0 +av +librosa +tyro<0.9.0 diff --git a/post-training/LLaMA-Factory/run_exp.sh b/post-training/LLaMA-Factory/run_exp.sh new file mode 100644 index 0000000..776eb9c --- /dev/null +++ b/post-training/LLaMA-Factory/run_exp.sh @@ -0,0 +1,2 @@ +set -x +FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/qwen2vl_full_sft_3b.yaml \ No newline at end of file diff --git a/post-training/LLaMA-Factory/scripts/api_example/test_image.py b/post-training/LLaMA-Factory/scripts/api_example/test_image.py new file mode 100644 index 0000000..afd2b69 --- /dev/null +++ b/post-training/LLaMA-Factory/scripts/api_example/test_image.py @@ -0,0 +1,65 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from openai import OpenAI +from transformers.utils.versions import require_version + + +require_version("openai>=1.5.0", "To fix: pip install openai>=1.5.0") + + +def main(): + client = OpenAI( + api_key="{}".format(os.getenv("API_KEY", "0")), + base_url="http://localhost:{}/v1".format(os.getenv("API_PORT", 8000)), + ) + messages = [] + messages.append( + { + "role": "user", + "content": [ + {"type": "text", "text": "Output the color and number of each box."}, + { + "type": "image_url", + "image_url": {"url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-VL/boxes.png"}, + }, + ], + } + ) + result = client.chat.completions.create(messages=messages, model="test") + messages.append(result.choices[0].message) + print("Round 1:", result.choices[0].message.content) + # The image shows a pyramid of colored blocks with numbers on them. Here are the colors and numbers of ... + messages.append( + { + "role": "user", + "content": [ + {"type": "text", "text": "What kind of flower is this?"}, + { + "type": "image_url", + "image_url": {"url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-VL/flowers.jpg"}, + }, + ], + } + ) + result = client.chat.completions.create(messages=messages, model="test") + messages.append(result.choices[0].message) + print("Round 2:", result.choices[0].message.content) + # The image shows a cluster of forget-me-not flowers. Forget-me-nots are small ... + + +if __name__ == "__main__": + main() diff --git a/post-training/LLaMA-Factory/scripts/api_example/test_toolcall.py b/post-training/LLaMA-Factory/scripts/api_example/test_toolcall.py new file mode 100644 index 0000000..e291ba6 --- /dev/null +++ b/post-training/LLaMA-Factory/scripts/api_example/test_toolcall.py @@ -0,0 +1,77 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +from openai import OpenAI +from transformers.utils.versions import require_version + + +require_version("openai>=1.5.0", "To fix: pip install openai>=1.5.0") + + +def calculate_gpa(grades: list[str], hours: list[int]) -> float: + grade_to_score = {"A": 4, "B": 3, "C": 2} + total_score, total_hour = 0, 0 + for grade, hour in zip(grades, hours): + total_score += grade_to_score[grade] * hour + total_hour += hour + return round(total_score / total_hour, 2) + + +def main(): + client = OpenAI( + api_key="{}".format(os.getenv("API_KEY", "0")), + base_url="http://localhost:{}/v1".format(os.getenv("API_PORT", 8000)), + ) + tools = [ + { + "type": "function", + "function": { + "name": "calculate_gpa", + "description": "Calculate the Grade Point Average (GPA) based on grades and credit hours", + "parameters": { + "type": "object", + "properties": { + "grades": {"type": "array", "items": {"type": "string"}, "description": "The grades"}, + "hours": {"type": "array", "items": {"type": "integer"}, "description": "The credit hours"}, + }, + "required": ["grades", "hours"], + }, + }, + } + ] + tool_map = {"calculate_gpa": calculate_gpa} + + messages = [] + messages.append({"role": "user", "content": "My grades are A, A, B, and C. The credit hours are 3, 4, 3, and 2."}) + result = client.chat.completions.create(messages=messages, model="test", tools=tools) + if result.choices[0].message.tool_calls is None: + raise ValueError("Cannot retrieve function call from the response.") + + messages.append(result.choices[0].message) + tool_call = result.choices[0].message.tool_calls[0].function + print(tool_call) + # Function(arguments='{"grades": ["A", "A", "B", "C"], "hours": [3, 4, 3, 2]}', name='calculate_gpa') + name, arguments = tool_call.name, json.loads(tool_call.arguments) + tool_result = tool_map[name](**arguments) + messages.append({"role": "tool", "content": json.dumps({"gpa": tool_result}, ensure_ascii=False)}) + result = client.chat.completions.create(messages=messages, model="test", tools=tools) + print(result.choices[0].message.content) + # Based on the grades and credit hours you provided, your Grade Point Average (GPA) is 3.42. + + +if __name__ == "__main__": + main() diff --git a/post-training/LLaMA-Factory/scripts/convert_ckpt/llamafy_baichuan2.py b/post-training/LLaMA-Factory/scripts/convert_ckpt/llamafy_baichuan2.py new file mode 100644 index 0000000..3dbeff4 --- /dev/null +++ b/post-training/LLaMA-Factory/scripts/convert_ckpt/llamafy_baichuan2.py @@ -0,0 +1,112 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +from collections import OrderedDict +from typing import Any + +import fire +import torch +from huggingface_hub import split_torch_state_dict_into_shards +from safetensors.torch import save_file +from tqdm import tqdm +from transformers.modeling_utils import SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME + + +CONFIG_NAME = "config.json" + + +def save_weight(input_dir: str, output_dir: str, shard_size: str, save_safetensors: bool): + baichuan2_state_dict: dict[str, torch.Tensor] = OrderedDict() + for filepath in tqdm(os.listdir(input_dir), desc="Load weights"): + if os.path.isfile(os.path.join(input_dir, filepath)) and filepath.endswith(".bin"): + shard_weight = torch.load(os.path.join(input_dir, filepath), map_location="cpu") + baichuan2_state_dict.update(shard_weight) + + llama_state_dict: dict[str, torch.Tensor] = OrderedDict() + for key, value in tqdm(baichuan2_state_dict.items(), desc="Convert format"): + if "W_pack" in key: + proj_size = value.size(0) // 3 + llama_state_dict[key.replace("W_pack", "q_proj")] = value[:proj_size, :] + llama_state_dict[key.replace("W_pack", "k_proj")] = value[proj_size : 2 * proj_size, :] + llama_state_dict[key.replace("W_pack", "v_proj")] = value[2 * proj_size :, :] + elif "lm_head" in key: + llama_state_dict[key] = torch.nn.functional.normalize(value) + else: + llama_state_dict[key] = value + + weights_name = SAFE_WEIGHTS_NAME if save_safetensors else WEIGHTS_NAME + filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors") + state_dict_split = split_torch_state_dict_into_shards( + llama_state_dict, filename_pattern=filename_pattern, max_shard_size=shard_size + ) + for shard_file, tensors in tqdm(state_dict_split.filename_to_tensors.items(), desc="Save weights"): + shard = {tensor: llama_state_dict[tensor].contiguous() for tensor in tensors} + if save_safetensors: + save_file(shard, os.path.join(output_dir, shard_file), metadata={"format": "pt"}) + else: + torch.save(shard, os.path.join(output_dir, shard_file)) + + if not state_dict_split.is_sharded: + print(f"Model weights saved in {os.path.join(output_dir, weights_name)}.") + else: + index = { + "metadata": state_dict_split.metadata, + "weight_map": state_dict_split.tensor_to_filename, + } + index_name = SAFE_WEIGHTS_INDEX_NAME if save_safetensors else WEIGHTS_INDEX_NAME + with open(os.path.join(output_dir, index_name), "w", encoding="utf-8") as f: + json.dump(index, f, indent=2, sort_keys=True) + + print(f"Model weights saved in {output_dir}.") + + +def save_config(input_dir: str, output_dir: str): + with open(os.path.join(input_dir, CONFIG_NAME), encoding="utf-8") as f: + llama2_config_dict: dict[str, Any] = json.load(f) + + llama2_config_dict["architectures"] = ["LlamaForCausalLM"] + llama2_config_dict.pop("auto_map", None) + llama2_config_dict.pop("tokenizer_class", None) + llama2_config_dict["model_type"] = "llama" + + with open(os.path.join(output_dir, CONFIG_NAME), "w", encoding="utf-8") as f: + json.dump(llama2_config_dict, f, indent=2) + + print(f"Model config saved in {os.path.join(output_dir, CONFIG_NAME)}") + + +def llamafy_baichuan2( + input_dir: str, + output_dir: str, + shard_size: str = "2GB", + save_safetensors: bool = True, +): + r"""Convert the Baichuan2-7B model in the same format as LLaMA2-7B. + + Usage: python llamafy_baichuan2.py --input_dir input --output_dir output + Converted model: https://huggingface.co/hiyouga/Baichuan2-7B-Base-LLaMAfied + """ + try: + os.makedirs(output_dir, exist_ok=False) + except Exception as e: + raise print("Output dir already exists", e) + + save_weight(input_dir, output_dir, shard_size, save_safetensors) + save_config(input_dir, output_dir) + + +if __name__ == "__main__": + fire.Fire(llamafy_baichuan2) diff --git a/post-training/LLaMA-Factory/scripts/convert_ckpt/llamafy_qwen.py b/post-training/LLaMA-Factory/scripts/convert_ckpt/llamafy_qwen.py new file mode 100644 index 0000000..599b0f1 --- /dev/null +++ b/post-training/LLaMA-Factory/scripts/convert_ckpt/llamafy_qwen.py @@ -0,0 +1,165 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +from collections import OrderedDict +from typing import Any + +import fire +import torch +from huggingface_hub import split_torch_state_dict_into_shards +from safetensors import safe_open +from safetensors.torch import save_file +from tqdm import tqdm +from transformers.modeling_utils import SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME +from transformers.utils import check_min_version + + +try: + check_min_version("4.34.0") +except Exception: + raise ValueError("Please upgrade `transformers` to 4.34.0") + + +CONFIG_NAME = "config.json" + + +def save_weight(input_dir: str, output_dir: str, shard_size: str, save_safetensors: bool) -> str: + qwen_state_dict: dict[str, torch.Tensor] = OrderedDict() + for filepath in tqdm(os.listdir(input_dir), desc="Load weights"): + if os.path.isfile(os.path.join(input_dir, filepath)) and filepath.endswith(".safetensors"): + with safe_open(os.path.join(input_dir, filepath), framework="pt", device="cpu") as f: + for key in f.keys(): + qwen_state_dict[key] = f.get_tensor(key) + + llama_state_dict: dict[str, torch.Tensor] = OrderedDict() + torch_dtype = None + for key, value in tqdm(qwen_state_dict.items(), desc="Convert format"): + if torch_dtype is None: + torch_dtype = value.dtype + if "wte" in key: + llama_state_dict["model.embed_tokens.weight"] = value + elif "ln_f" in key: + llama_state_dict["model.norm.weight"] = value + else: + key = key.replace("transformer.h", "model.layers") + if "attn.c_attn" in key: + proj_size = value.size(0) // 3 + llama_state_dict[key.replace("attn.c_attn", "self_attn.q_proj")] = value[:proj_size, ...] + llama_state_dict[key.replace("attn.c_attn", "self_attn.k_proj")] = value[ + proj_size : 2 * proj_size, ... + ] + llama_state_dict[key.replace("attn.c_attn", "self_attn.v_proj")] = value[2 * proj_size :, ...] + elif "attn.c_proj" in key: + llama_state_dict[key.replace("attn.c_proj", "self_attn.o_proj")] = value + llama_state_dict[key.replace("attn.c_proj.weight", "self_attn.o_proj.bias")] = torch.zeros_like( + value[:, 0] + ).squeeze() + elif "ln_1" in key: + llama_state_dict[key.replace("ln_1", "input_layernorm")] = value + elif "ln_2" in key: + llama_state_dict[key.replace("ln_2", "post_attention_layernorm")] = value + elif "mlp.w1" in key: + llama_state_dict[key.replace("mlp.w1", "mlp.up_proj")] = value + elif "mlp.w2" in key: + llama_state_dict[key.replace("mlp.w2", "mlp.gate_proj")] = value + elif "mlp.c_proj" in key: + llama_state_dict[key.replace("mlp.c_proj", "mlp.down_proj")] = value + elif "lm_head" in key: + llama_state_dict[key] = value + else: + raise KeyError(f"Unable to process key {key}") + + weights_name = SAFE_WEIGHTS_NAME if save_safetensors else WEIGHTS_NAME + filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors") + state_dict_split = split_torch_state_dict_into_shards( + llama_state_dict, filename_pattern=filename_pattern, max_shard_size=shard_size + ) + for shard_file, tensors in tqdm(state_dict_split.filename_to_tensors.items(), desc="Save weights"): + shard = {tensor: llama_state_dict[tensor].contiguous() for tensor in tensors} + if save_safetensors: + save_file(shard, os.path.join(output_dir, shard_file), metadata={"format": "pt"}) + else: + torch.save(shard, os.path.join(output_dir, shard_file)) + + if not state_dict_split.is_sharded: + print(f"Model weights saved in {os.path.join(output_dir, weights_name)}.") + else: + index = { + "metadata": state_dict_split.metadata, + "weight_map": state_dict_split.tensor_to_filename, + } + index_name = SAFE_WEIGHTS_INDEX_NAME if save_safetensors else WEIGHTS_INDEX_NAME + with open(os.path.join(output_dir, index_name), "w", encoding="utf-8") as f: + json.dump(index, f, indent=2, sort_keys=True) + + print(f"Model weights saved in {output_dir}.") + + return str(torch_dtype).replace("torch.", "") + + +def save_config(input_dir: str, output_dir: str, torch_dtype: str): + with open(os.path.join(input_dir, CONFIG_NAME), encoding="utf-8") as f: + qwen_config_dict: dict[str, Any] = json.load(f) + + llama2_config_dict: dict[str, Any] = OrderedDict() + llama2_config_dict["architectures"] = ["LlamaForCausalLM"] + llama2_config_dict["hidden_act"] = "silu" + llama2_config_dict["hidden_size"] = qwen_config_dict["hidden_size"] + llama2_config_dict["initializer_range"] = qwen_config_dict["initializer_range"] + llama2_config_dict["intermediate_size"] = qwen_config_dict["intermediate_size"] // 2 + llama2_config_dict["max_position_embeddings"] = qwen_config_dict["max_position_embeddings"] + llama2_config_dict["model_type"] = "llama" + llama2_config_dict["num_attention_heads"] = qwen_config_dict["num_attention_heads"] + llama2_config_dict["num_hidden_layers"] = qwen_config_dict["num_hidden_layers"] + llama2_config_dict["num_key_value_heads"] = qwen_config_dict["hidden_size"] // qwen_config_dict["kv_channels"] + llama2_config_dict["pretraining_tp"] = 1 + llama2_config_dict["rms_norm_eps"] = qwen_config_dict["layer_norm_epsilon"] + llama2_config_dict["rope_scaling"] = None + llama2_config_dict["tie_word_embeddings"] = qwen_config_dict["tie_word_embeddings"] + llama2_config_dict["torch_dtype"] = torch_dtype + llama2_config_dict["transformers_version"] = "4.34.0" + llama2_config_dict["use_cache"] = True + llama2_config_dict["vocab_size"] = qwen_config_dict["vocab_size"] + llama2_config_dict["attention_bias"] = True + + with open(os.path.join(output_dir, CONFIG_NAME), "w", encoding="utf-8") as f: + json.dump(llama2_config_dict, f, indent=2) + + print(f"Model config saved in {os.path.join(output_dir, CONFIG_NAME)}") + + +def llamafy_qwen( + input_dir: str, + output_dir: str, + shard_size: str = "2GB", + save_safetensors: bool = False, +): + r"""Convert the Qwen models in the same format as LLaMA2. + + Usage: python llamafy_qwen.py --input_dir input --output_dir output + Converted model: https://huggingface.co/hiyouga/Qwen-14B-Chat-LLaMAfied + """ + try: + os.makedirs(output_dir, exist_ok=False) + except Exception as e: + raise print("Output dir already exists", e) + + torch_dtype = save_weight(input_dir, output_dir, shard_size, save_safetensors) + save_config(input_dir, output_dir, torch_dtype) + + +if __name__ == "__main__": + fire.Fire(llamafy_qwen) diff --git a/post-training/LLaMA-Factory/scripts/convert_ckpt/tiny_llama4.py b/post-training/LLaMA-Factory/scripts/convert_ckpt/tiny_llama4.py new file mode 100644 index 0000000..2a96cfa --- /dev/null +++ b/post-training/LLaMA-Factory/scripts/convert_ckpt/tiny_llama4.py @@ -0,0 +1,39 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from transformers import Llama4Config, Llama4ForConditionalGeneration, Llama4TextConfig, Llama4VisionConfig + + +if __name__ == "__main__": + vision_config = Llama4VisionConfig( + hidden_size=1408, + image_size=336, + intermediate_size=5632, + num_attention_heads=16, + num_hidden_layers=4, + vision_output_dim=4096, + ) + text_config = Llama4TextConfig( + hidden_size=512, + intermediate_size=1024, + intermediate_size_mlp=1024, + num_hidden_layers=4, + num_attention_heads=8, + num_key_value_heads=2, + head_dim=512 // 8, + num_local_experts=2, + ) + config = Llama4Config(vision_config=vision_config, text_config=text_config) + model = Llama4ForConditionalGeneration._from_config(config) + model.save_pretrained("tiny-llama4") diff --git a/post-training/LLaMA-Factory/scripts/eval_bleu_rouge.py b/post-training/LLaMA-Factory/scripts/eval_bleu_rouge.py new file mode 100644 index 0000000..22e370b --- /dev/null +++ b/post-training/LLaMA-Factory/scripts/eval_bleu_rouge.py @@ -0,0 +1,79 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import logging +import time + +import fire +from datasets import load_dataset + + +try: + import jieba # type: ignore + from nltk.translate.bleu_score import SmoothingFunction, sentence_bleu # type: ignore + from rouge_chinese import Rouge # type: ignore + + jieba.setLogLevel(logging.CRITICAL) + jieba.initialize() +except ImportError: + print("Please install llamafactory with `pip install -e .[metrics]`.") + raise + + +def compute_metrics(sample): + hypothesis = list(jieba.cut(sample["predict"])) + reference = list(jieba.cut(sample["label"])) + + bleu_score = sentence_bleu( + [list(sample["label"])], + list(sample["predict"]), + smoothing_function=SmoothingFunction().method3, + ) + + if len(" ".join(hypothesis).split()) == 0 or len(" ".join(reference).split()) == 0: + result = {"rouge-1": {"f": 0.0}, "rouge-2": {"f": 0.0}, "rouge-l": {"f": 0.0}} + else: + rouge = Rouge() + scores = rouge.get_scores(" ".join(hypothesis), " ".join(reference)) + result = scores[0] + + metric_result = {} + for k, v in result.items(): + metric_result[k] = round(v["f"] * 100, 4) + + metric_result["bleu-4"] = round(bleu_score * 100, 4) + + return metric_result + + +def main(filename: str): + start_time = time.time() + dataset = load_dataset("json", data_files=filename, split="train") + dataset = dataset.map(compute_metrics, num_proc=8, remove_columns=dataset.column_names) + score_dict = dataset.to_dict() + + average_score = {} + for task, scores in sorted(score_dict.items(), key=lambda x: x[0]): + print(f"{task}: {sum(scores) / len(scores):.4f}") + average_score[task] = sum(scores) / len(scores) + + with open("predictions_score.json", "w", encoding="utf-8") as f: + json.dump(average_score, f, indent=4) + + print(f"\nDone in {time.time() - start_time:.3f}s.\nScore file saved to predictions_score.json") + + +if __name__ == "__main__": + fire.Fire(main) diff --git a/post-training/LLaMA-Factory/scripts/llama_pro.py b/post-training/LLaMA-Factory/scripts/llama_pro.py new file mode 100644 index 0000000..7e4b944 --- /dev/null +++ b/post-training/LLaMA-Factory/scripts/llama_pro.py @@ -0,0 +1,129 @@ +# Copyright 2025 Tencent Inc. and the LlamaFactory team. +# +# This code is inspired by the Tencent's LLaMA-Pro library. +# https://github.com/TencentARC/LLaMA-Pro/blob/main/scripts/block_expansion.py +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +from collections import OrderedDict +from typing import TYPE_CHECKING + +import fire +import torch +from huggingface_hub import split_torch_state_dict_into_shards +from safetensors.torch import save_file +from tqdm import tqdm +from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, PreTrainedModel +from transformers.modeling_utils import SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME + + +if TYPE_CHECKING: + from transformers import PretrainedConfig + + +def change_name(name: str, old_index: int, new_index: int) -> str: + return name.replace(f".{old_index:d}.", f".{new_index:d}.") + + +def block_expansion( + model_name_or_path: str, + output_dir: str, + num_expand: int, + shard_size: str = "5GB", + save_safetensors: bool = True, +): + r"""Perform block expansion for LLaMA, Mistral, Qwen2 or Yi models. + + Usage: python llama_pro.py --model_name_or_path meta-llama/Llama-2-7b-hf --output_dir llama2_pro --num_expand 8 + """ + config: PretrainedConfig = AutoConfig.from_pretrained(model_name_or_path, trust_remote_code=True) + num_layers = getattr(config, "num_hidden_layers") + if num_layers % num_expand != 0: + raise ValueError(f"`num_layers` {num_layers} should be divisible by `num_expand` {num_expand}.") + + setattr(config, "num_hidden_layers", num_layers + num_expand) + config.save_pretrained(output_dir) + + tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True) + tokenizer.save_pretrained(output_dir) + + print(f"Expanding model of {num_layers} layers to {num_layers + num_expand} layers.") + model = AutoModelForCausalLM.from_pretrained( + model_name_or_path, torch_dtype="auto", device_map="cpu", trust_remote_code=True, low_cpu_mem_usage=True + ) + assert isinstance(model, PreTrainedModel) # type hint + if save_safetensors and getattr(model.config, "tie_word_embeddings", False): + del model.lm_head # safetensors does not allow shared weights + + split = num_layers // num_expand + layer_cnt = 0 + state_dict = model.state_dict() + output_state_dict: dict[str, torch.Tensor] = OrderedDict() + for i in range(num_layers): + for key, value in state_dict.items(): + if f".{i:d}." in key: + output_state_dict[change_name(key, i, layer_cnt)] = value + + print(f"Add layer {layer_cnt} copied from layer {i}.") + layer_cnt += 1 + if (i + 1) % split == 0: + for key, value in state_dict.items(): + if f".{i:d}." in key: + if "down_proj" in key or "o_proj" in key: + output_state_dict[change_name(key, i, layer_cnt)] = torch.zeros_like(value) + else: + output_state_dict[change_name(key, i, layer_cnt)] = torch.clone(value) + + print(f"Add layer {layer_cnt} expanded from layer {i}.") + layer_cnt += 1 + + for key, value in state_dict.items(): + if key not in output_state_dict: + output_state_dict[key] = value + + weights_name = SAFE_WEIGHTS_NAME if save_safetensors else WEIGHTS_NAME + filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors") + state_dict_split = split_torch_state_dict_into_shards( + output_state_dict, filename_pattern=filename_pattern, max_shard_size=shard_size + ) + for shard_file, tensors in tqdm(state_dict_split.filename_to_tensors.items(), desc="Save weights"): + shard = {tensor: output_state_dict[tensor].contiguous() for tensor in tensors} + if save_safetensors: + save_file(shard, os.path.join(output_dir, shard_file), metadata={"format": "pt"}) + else: + torch.save(shard, os.path.join(output_dir, shard_file)) + + if not state_dict_split.is_sharded: + print(f"Model weights saved in {os.path.join(output_dir, weights_name)}.") + else: + index = { + "metadata": state_dict_split.metadata, + "weight_map": state_dict_split.tensor_to_filename, + } + index_name = SAFE_WEIGHTS_INDEX_NAME if save_safetensors else WEIGHTS_INDEX_NAME + with open(os.path.join(output_dir, index_name), "w", encoding="utf-8") as f: + json.dump(index, f, indent=2, sort_keys=True) + + print(f"Model weights saved in {output_dir}.") + + print("- Fine-tune this model with:") + print(f"model_name_or_path: {output_dir}") + print("finetuning_type: freeze") + print(f"freeze_trainable_layers: {num_expand}") + print("use_llama_pro: true") + + +if __name__ == "__main__": + fire.Fire(block_expansion) diff --git a/post-training/LLaMA-Factory/scripts/loftq_init.py b/post-training/LLaMA-Factory/scripts/loftq_init.py new file mode 100644 index 0000000..3a79338 --- /dev/null +++ b/post-training/LLaMA-Factory/scripts/loftq_init.py @@ -0,0 +1,88 @@ +# Copyright 2025 HuggingFace Inc. and the LlamaFactory team. +# +# This code is based on the HuggingFace's PEFT library. +# https://github.com/huggingface/peft/blob/v0.10.0/examples/loftq_finetuning/quantize_save_load.py +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from typing import TYPE_CHECKING + +import fire +from peft import LoftQConfig, LoraConfig, TaskType, get_peft_model +from transformers import AutoModelForCausalLM, AutoTokenizer + + +if TYPE_CHECKING: + from transformers import PreTrainedModel + + +def quantize_loftq( + model_name_or_path: str, + output_dir: str, + loftq_bits: int = 4, + loftq_iter: int = 4, + lora_alpha: int = None, + lora_rank: int = 16, + lora_dropout: float = 0, + lora_target: tuple = ("q_proj", "v_proj"), + save_safetensors: bool = True, +): + r"""Initialize LoRA weights with LoRA-fine-tuning-aware Quantization (LoftQ). + + Usage: python loftq_init.py --model_name_or_path path_to_model --output_dir output_dir + """ + if isinstance(lora_target, str): + lora_target = [name.strip() for name in lora_target.split(",")] + + tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True) + model = AutoModelForCausalLM.from_pretrained(model_name_or_path, trust_remote_code=True, torch_dtype="auto") + + loftq_config = LoftQConfig(loftq_bits=loftq_bits, loftq_iter=loftq_iter) + lora_config = LoraConfig( + task_type=TaskType.CAUSAL_LM, + inference_mode=True, + r=lora_rank, + lora_alpha=lora_alpha if lora_alpha is not None else lora_rank * 2, + lora_dropout=lora_dropout, + target_modules=lora_target, + init_lora_weights="loftq", + loftq_config=loftq_config, + ) + + # Init LoftQ model + print("Initializing LoftQ weights, it may be take several minutes, wait patiently.") + peft_model = get_peft_model(model, lora_config) + loftq_dir = os.path.join(output_dir, "loftq_init") + + # Save LoftQ model + setattr(peft_model.peft_config["default"], "base_model_name_or_path", os.path.abspath(output_dir)) + setattr(peft_model.peft_config["default"], "init_lora_weights", True) # don't apply loftq again + peft_model.save_pretrained(loftq_dir, safe_serialization=save_safetensors) + print(f"Adapter weights saved in {loftq_dir}") + + # Save base model + base_model: PreTrainedModel = peft_model.unload() + base_model.save_pretrained(output_dir, safe_serialization=save_safetensors) + tokenizer.save_pretrained(output_dir) + print(f"Model weights saved in {output_dir}") + + print("- Fine-tune this model with:") + print(f"model_name_or_path: {output_dir}") + print(f"adapter_name_or_path: {loftq_dir}") + print("finetuning_type: lora") + print(f"quantization_bit: {loftq_bits}") + + +if __name__ == "__main__": + fire.Fire(quantize_loftq) diff --git a/post-training/LLaMA-Factory/scripts/pissa_init.py b/post-training/LLaMA-Factory/scripts/pissa_init.py new file mode 100644 index 0000000..405a147 --- /dev/null +++ b/post-training/LLaMA-Factory/scripts/pissa_init.py @@ -0,0 +1,86 @@ +# Copyright 2025 HuggingFace Inc. and the LlamaFactory team. +# +# This code is based on the HuggingFace's PEFT library. +# https://github.com/huggingface/peft/blob/v0.11.0/examples/pissa_finetuning/preprocess.py +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from typing import TYPE_CHECKING + +import fire +from peft import LoraConfig, TaskType, get_peft_model +from transformers import AutoModelForCausalLM, AutoTokenizer + + +if TYPE_CHECKING: + from transformers import PreTrainedModel + + +def quantize_pissa( + model_name_or_path: str, + output_dir: str, + pissa_iter: int = 16, + lora_alpha: int = None, + lora_rank: int = 16, + lora_dropout: float = 0, + lora_target: tuple = ("q_proj", "v_proj"), + save_safetensors: bool = True, +): + r"""Initialize LoRA weights with Principal Singular values and Singular vectors Adaptation (PiSSA). + + Usage: python pissa_init.py --model_name_or_path path_to_model --output_dir output_dir + """ + if isinstance(lora_target, str): + lora_target = [name.strip() for name in lora_target.split(",")] + + tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True) + model = AutoModelForCausalLM.from_pretrained(model_name_or_path, trust_remote_code=True, torch_dtype="auto") + + lora_config = LoraConfig( + task_type=TaskType.CAUSAL_LM, + r=lora_rank, + lora_alpha=lora_alpha if lora_alpha is not None else lora_rank * 2, + lora_dropout=lora_dropout, + target_modules=lora_target, + init_lora_weights="pissa" if pissa_iter == -1 else f"pissa_niter_{pissa_iter}", + ) + + # Init PiSSA model + peft_model = get_peft_model(model, lora_config) + pissa_dir = os.path.join(output_dir, "pissa_init") + + # Save PiSSA model + setattr(peft_model.peft_config["default"], "base_model_name_or_path", os.path.abspath(output_dir)) + setattr(peft_model.peft_config["default"], "init_lora_weights", True) # don't apply pissa again + peft_model.save_pretrained(pissa_dir, safe_serialization=save_safetensors) + print(f"Adapter weights saved in {pissa_dir}") + + # Save base model + base_model: PreTrainedModel = peft_model.unload() + base_model.save_pretrained(output_dir, safe_serialization=save_safetensors) + tokenizer.save_pretrained(output_dir) + print(f"Model weights saved in {output_dir}") + + print("- Fine-tune this model with:") + print(f"model_name_or_path: {output_dir}") + print(f"adapter_name_or_path: {pissa_dir}") + print("finetuning_type: lora") + print("pissa_init: false") + print("pissa_convert: true") + print("- and optionally with:") + print("quantization_bit: 4") + + +if __name__ == "__main__": + fire.Fire(quantize_pissa) diff --git a/post-training/LLaMA-Factory/scripts/qwen_omni_merge.py b/post-training/LLaMA-Factory/scripts/qwen_omni_merge.py new file mode 100644 index 0000000..449b17b --- /dev/null +++ b/post-training/LLaMA-Factory/scripts/qwen_omni_merge.py @@ -0,0 +1,118 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil + +import fire +from peft import PeftModel +from transformers import AutoModel, AutoProcessor, Qwen2_5OmniThinkerForConditionalGeneration # type: ignore + + +def merge_lora( + base_model_path: str, + lora_checkpoint_path: str, + extra_file: str = "spk_dict.pt", + submodule_name: str = "thinker", + save_path: str = "./merged_model_checkpoint", +): + """Load the original model, tokenizer, and processor configuration, merge the LoRA weights. + + For a specified submodule, and save the final merged model along with its configurations. + + Args: + base_model_path (str): Path to the original model directory. + lora_checkpoint_path (str): Path to the directory containing LoRA weights. + extra_file (str): Name of the extra file to be copied (default: "spk_dict.pt"). + submodule_name (str): Name of the submodule to merge (default: "thinker"). + save_path (str): Directory where the merged model and configurations will be saved. + """ + # 1. Load the original model, tokenizer, and processor + model = AutoModel.from_pretrained(base_model_path, torch_dtype="auto", device_map="cpu") + processor = AutoProcessor.from_pretrained(base_model_path) + print("Successfully loaded the original model and tokenizer.") + + # 2. Extract the submodule to be merged (e.g., model.thinker) + if not hasattr(model, submodule_name): + raise AttributeError(f"The model does not have a submodule named '{submodule_name}'.") + + base_submodule = getattr(model, submodule_name) + print(f"Successfully extracted submodule: {submodule_name}.") + + # 3. Load the LoRA weights onto the extracted submodule + lora_model = PeftModel.from_pretrained(base_submodule, lora_checkpoint_path) + print("LoRA weights loaded successfully.") + + # 4. Merge the LoRA weights into the submodule and unload the LoRA modules + merged_submodule = lora_model.merge_and_unload() + print("LoRA weights merged successfully.") + + # 5. Replace the original submodule with the merged submodule in the model + setattr(model, submodule_name, merged_submodule) + + # 6. Save the final merged model along with the tokenizer and processor configuration + model.save_pretrained(save_path) + processor.save_pretrained(save_path) + print(f"Merged model and tokenizer saved to {save_path}.") + + source_file = os.path.join(base_model_path, extra_file) + target_file = os.path.join(save_path, extra_file) + if os.path.exists(source_file): + shutil.copy(source_file, target_file) + print(f"File '{extra_file}' copied from {base_model_path} to {save_path}.") + else: + print(f"File '{extra_file}' not found in {base_model_path}, skipping copy.") + + +def save_full_model( + saved_thinker_path: str, + base_model_path: str, + save_path: str = "./merged_model_checkpoint", + extra_file: str = "spk_dict.pt", +): + """Load the saved thinker module and the original model, replace the thinker in the original model. + + Then save the complete model along with its tokenizer and processor configuration. + + Args: + saved_thinker_path (str): Path to the saved thinker weights. + base_model_path (str): Directory path of the original model. + save_path (str): Directory where the merged model and configurations will be saved. + extra_file (str): Name of the extra file to be copied (default: "spk_dict.pt"). + """ + # 1. Load the saved thinker module and the original model + thinker = Qwen2_5OmniThinkerForConditionalGeneration.from_pretrained( + saved_thinker_path, torch_dtype="auto", device_map="cpu" + ) + base_model = AutoModel.from_pretrained(base_model_path, torch_dtype="auto", device_map="cpu") + base_model.thinker = thinker + + # 2. Save the complete model along with its tokenizer and processor configuration + processor = AutoProcessor.from_pretrained(base_model_path) + base_model.save_pretrained(save_path) + processor.save_pretrained(save_path) + print(f"Merged model and tokenizer saved to {save_path}.") + + # 3. Copy the extra file from the base model directory to the save_path + source_file = os.path.join(base_model_path, extra_file) + target_file = os.path.join(save_path, extra_file) + if os.path.exists(source_file): + shutil.copy(source_file, target_file) + print(f"File '{extra_file}' copied from {base_model_path} to {save_path}.") + else: + print(f"File '{extra_file}' not found in {base_model_path}, skipping copy.") + + +if __name__ == "__main__": + fire.Fire({"save_full": save_full_model, "merge_lora": merge_lora}) diff --git a/post-training/LLaMA-Factory/scripts/stat_utils/cal_flops.py b/post-training/LLaMA-Factory/scripts/stat_utils/cal_flops.py new file mode 100644 index 0000000..3dc0499 --- /dev/null +++ b/post-training/LLaMA-Factory/scripts/stat_utils/cal_flops.py @@ -0,0 +1,49 @@ +# Copyright 2025 Microsoft Corporation and the LlamaFactory team. +# +# This code is inspired by the Microsoft's DeepSpeed library. +# https://www.deepspeed.ai/tutorials/flops-profiler/ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fire +import torch +from deepspeed.accelerator import get_accelerator # type: ignore +from deepspeed.profiling.flops_profiler import get_model_profile # type: ignore + +from llamafactory.chat import ChatModel + + +def calculate_flops( + model_name_or_path: str, + batch_size: int = 1, + seq_length: int = 512, + flash_attn: str = "auto", +): + r"""Calculate the flops of pre-trained models. + + Usage: python cal_flops.py --model_name_or_path path_to_model --batch_size 1 --seq_length 512 + """ + with get_accelerator().device(0): + chat_model = ChatModel(dict(model_name_or_path=model_name_or_path, template="empty", flash_attn=flash_attn)) + fake_input = torch.ones((batch_size, seq_length), dtype=torch.long, device=chat_model.engine.model.device) + input_dict = {"input_ids": fake_input, "labels": fake_input.clone()} + flops, macs, params = get_model_profile( + chat_model.engine.model, kwargs=input_dict, print_profile=True, detailed=True + ) + print("FLOPs:", flops) + print("MACs:", macs) + print("Params:", params) + + +if __name__ == "__main__": + fire.Fire(calculate_flops) diff --git a/post-training/LLaMA-Factory/scripts/stat_utils/cal_lr.py b/post-training/LLaMA-Factory/scripts/stat_utils/cal_lr.py new file mode 100644 index 0000000..eb35c47 --- /dev/null +++ b/post-training/LLaMA-Factory/scripts/stat_utils/cal_lr.py @@ -0,0 +1,98 @@ +# Copyright 2025 imoneoi and the LlamaFactory team. +# +# This code is inspired by the imoneoi's OpenChat library. +# https://github.com/imoneoi/openchat/blob/3.6.0/ochat/training_deepspeed/train.py +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import Literal + +import fire +import torch +from torch.utils.data import DataLoader +from tqdm import tqdm +from transformers import DataCollatorForLanguageModeling + +from llamafactory.data import MultiModalDataCollatorForSeq2Seq, get_dataset, get_template_and_fix_tokenizer +from llamafactory.extras.constants import IGNORE_INDEX +from llamafactory.hparams import get_train_args +from llamafactory.model import load_tokenizer + + +BASE_LR = 3e-4 # 1.5e-4 for 30B-70B models +BASE_BS = 4_000_000 # from llama paper + + +def calculate_lr( + model_name_or_path: str, + batch_size: int, # total batch size, namely (batch size * gradient accumulation * world size) + stage: Literal["pt", "sft"] = "sft", + dataset: str = "alpaca_en_demo", + dataset_dir: str = "data", + template: str = "default", + cutoff_len: int = 2048, # i.e. maximum input length during training + is_mistral_or_gemma: bool = False, # mistral and gemma models opt for a smaller learning rate, + packing: bool = False, +): + r"""Calculate the optimal learning rate for 7B/13B models using LLaMA's hyper-parameters. + + Usage: + python cal_lr.py --model_name_or_path path_to_model --dataset alpaca_en_demo --cutoff_len 1024 --batch_size 16 + """ + model_args, data_args, training_args, _, _ = get_train_args( + dict( + stage=stage, + model_name_or_path=model_name_or_path, + dataset=dataset, + dataset_dir=dataset_dir, + template=template, + cutoff_len=cutoff_len, + packing=packing, + preprocessing_num_workers=16, + output_dir="dummy_dir", + overwrite_cache=True, + do_train=True, + ) + ) + tokenizer_module = load_tokenizer(model_args) + tokenizer = tokenizer_module["tokenizer"] + template = get_template_and_fix_tokenizer(tokenizer, data_args) + trainset = get_dataset(template, model_args, data_args, training_args, stage, **tokenizer_module)["train_dataset"] + if stage == "pt": + data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) + elif stage == "sft": + data_collator = MultiModalDataCollatorForSeq2Seq( + template=template, tokenizer=tokenizer, label_pad_token_id=IGNORE_INDEX + ) + else: + raise NotImplementedError(f"Stage does not supported: {stage}.") + + dataloader = DataLoader(trainset, batch_size, shuffle=False, collate_fn=data_collator, pin_memory=True) + valid_tokens, total_tokens = 0, 0 + for batch in tqdm(dataloader, desc="Collecting valid tokens"): + valid_tokens += torch.sum(batch["labels"] != IGNORE_INDEX).item() + total_tokens += torch.numel(batch["labels"]) + + valid_ratio = valid_tokens / total_tokens + token_batch_size = cutoff_len * batch_size * valid_ratio + lr = BASE_LR * math.sqrt(token_batch_size / BASE_BS) # lr ~ sqrt(batch_size) + lr = lr / 6.0 if is_mistral_or_gemma else lr + print( + f"Optimal learning rate is {lr:.2e} for valid ratio% {valid_ratio * 100:.2f} " + f"and effective token batch size {token_batch_size:.2f}" + ) + + +if __name__ == "__main__": + fire.Fire(calculate_lr) diff --git a/post-training/LLaMA-Factory/scripts/stat_utils/cal_mfu.py b/post-training/LLaMA-Factory/scripts/stat_utils/cal_mfu.py new file mode 100644 index 0000000..f1d4446 --- /dev/null +++ b/post-training/LLaMA-Factory/scripts/stat_utils/cal_mfu.py @@ -0,0 +1,161 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +import fire +import torch +import torch.distributed as dist +from transformers import AutoConfig + +from llamafactory.train.tuner import run_exp + + +BASE = 2 # gemm (add + mul) + + +def compute_model_flops( + model_name_or_path: str, + total_batch_size: int, + seq_length: int, + include_backward: bool = True, + include_recompute: bool = False, + include_flashattn: bool = False, +) -> int: + r"""Calculate the FLOPs of model per forward/backward pass.""" + config = AutoConfig.from_pretrained(model_name_or_path) + hidden_size = getattr(config, "hidden_size", None) + vocab_size = getattr(config, "vocab_size", None) + intermediate_size = getattr(config, "intermediate_size", None) + num_attention_heads = getattr(config, "num_attention_heads", None) + num_key_value_heads = getattr(config, "num_key_value_heads", None) + num_hidden_layers = getattr(config, "num_hidden_layers", None) + tie_word_embeddings = getattr(config, "tie_word_embeddings", False) + + # mlp module + mlp_flops_per_token = 3 * BASE * hidden_size * intermediate_size # up, gate, down + mlp_flops = total_batch_size * seq_length * num_hidden_layers * mlp_flops_per_token + + # attn projector module + q_flops_per_token = BASE * hidden_size * hidden_size + o_flops_per_token = BASE * hidden_size * hidden_size + k_flops_per_token = BASE * hidden_size * hidden_size * num_key_value_heads // num_attention_heads + v_flops_per_token = BASE * hidden_size * hidden_size * num_key_value_heads // num_attention_heads + attn_proj_flops_per_token = q_flops_per_token + o_flops_per_token + k_flops_per_token + v_flops_per_token + attn_proj_flops = total_batch_size * seq_length * num_hidden_layers * attn_proj_flops_per_token + + # attn sdpa module + sdpa_flops_per_layer = 2 * BASE * hidden_size * seq_length * seq_length # (q * k^T) * v + sdpa_flops = total_batch_size * num_hidden_layers * sdpa_flops_per_layer + + # embedding module + embedding_flops_per_token = hidden_size * vocab_size + embedding_flops = total_batch_size * seq_length * embedding_flops_per_token + if tie_word_embeddings is False: + embedding_flops *= 2 + + non_embedding_flops = mlp_flops + attn_proj_flops + sdpa_flops + non_embedding_coeff, embedding_coeff = 1, 1 + if include_backward: + non_embedding_coeff += 2 + embedding_coeff += 2 + + if include_recompute: + non_embedding_coeff += 1 + + total_flops = non_embedding_coeff * non_embedding_flops + embedding_coeff * embedding_flops + + if include_flashattn: + total_flops += sdpa_flops + + return total_flops + + +def compute_device_flops(world_size: int) -> float: + r"""Calculate the FLOPs of the device capability per second.""" + device_name = torch.cuda.get_device_name() + if "H100" in device_name or "H800" in device_name: + return 989 * 1e12 * world_size + elif "A100" in device_name or "A800" in device_name: + return 312 * 1e12 * world_size + elif "V100" in device_name: + return 125 * 1e12 * world_size + elif "4090" in device_name: + return 98 * 1e12 * world_size + else: + raise NotImplementedError(f"Device not supported: {device_name}.") + + +def calculate_mfu( + model_name_or_path: str, + batch_size: int = 1, + seq_length: int = 1024, + num_steps: int = 100, + finetuning_type: str = "lora", + flash_attn: str = "auto", + deepspeed_stage: int = 0, + disable_gc: bool = False, + liger_kernel: bool = False, + unsloth_gc: bool = False, +) -> float: + r"""Calculate MFU for given model and hyper-params. + + Usage: python cal_mfu.py --model_name_or_path path_to_model --batch_size 1 --seq_length 1024 + """ + args = { + "model_name_or_path": model_name_or_path, + "flash_attn": flash_attn, + "disable_gradient_checkpointing": disable_gc, + "enable_liger_kernel": liger_kernel, + "use_unsloth_gc": unsloth_gc, + "stage": "pt", + "do_train": True, + "finetuning_type": finetuning_type, + "dataset": "c4_demo", + "cutoff_len": seq_length, + "output_dir": os.path.join("saves", "test_mfu"), + "logging_strategy": "no", + "save_strategy": "no", + "save_only_model": True, + "overwrite_output_dir": True, + "per_device_train_batch_size": batch_size, + "max_steps": num_steps, + "bf16": True, + } + if deepspeed_stage in [2, 3]: + args["deepspeed"] = f"examples/deepspeed/ds_z{deepspeed_stage}_config.json" + + run_exp(args) + if dist.is_initialized(): + dist.barrier() + world_size = dist.get_world_size() + else: + world_size = 1 + + if int(os.getenv("LOCAL_RANK", "0")) == 0: + with open(os.path.join("saves", "test_mfu", "all_results.json"), encoding="utf-8") as f: + result = json.load(f) + + total_batch_size = batch_size * world_size + mfu_value = ( + result["train_steps_per_second"] + * compute_model_flops(model_name_or_path, total_batch_size, seq_length) + / compute_device_flops(world_size) + ) + print(f"MFU: {mfu_value * 100:.2f}%") + + +if __name__ == "__main__": + fire.Fire(calculate_mfu) diff --git a/post-training/LLaMA-Factory/scripts/stat_utils/cal_ppl.py b/post-training/LLaMA-Factory/scripts/stat_utils/cal_ppl.py new file mode 100644 index 0000000..8d47ffd --- /dev/null +++ b/post-training/LLaMA-Factory/scripts/stat_utils/cal_ppl.py @@ -0,0 +1,134 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from dataclasses import dataclass +from typing import Any, Literal, Optional + +import fire +import torch +from torch.utils.data import DataLoader +from tqdm import tqdm +from transformers import DataCollatorForLanguageModeling + +from llamafactory.data import MultiModalDataCollatorForSeq2Seq, get_dataset, get_template_and_fix_tokenizer +from llamafactory.extras.constants import IGNORE_INDEX +from llamafactory.hparams import get_train_args +from llamafactory.model import load_model, load_tokenizer + + +@dataclass +class PairwiseDataCollatorWithPadding(MultiModalDataCollatorForSeq2Seq): + r"""Data collator for pairwise data.""" + + train_on_prompt: bool = False + + def __call__(self, features: list[dict[str, Any]]) -> dict[str, torch.Tensor]: + r"""Pad batched data to the longest sequence in the batch.""" + chosen_features = [] + for feature in features: + chosen_features.append( + { + "input_ids": feature["chosen_input_ids"], + "attention_mask": feature["chosen_attention_mask"], + "labels": feature["chosen_input_ids"] if self.train_on_prompt else feature["chosen_labels"], + "images": feature["images"], + "videos": feature["videos"], + "audios": feature["audios"], + } + ) + + return super().__call__(chosen_features) + + +def calculate_ppl( + model_name_or_path: str, + save_name: str = "ppl.json", + batch_size: int = 4, + stage: Literal["pt", "sft", "rm"] = "sft", + dataset: str = "alpaca_en_demo", + dataset_dir: str = "data", + template: str = "default", + cutoff_len: int = 2048, + max_samples: Optional[int] = None, + train_on_prompt: bool = False, +): + r"""Calculate the ppl on the dataset of the pre-trained models. + + Usage: export CUDA_VISIBLE_DEVICES=0 + python cal_ppl.py --model_name_or_path path_to_model --dataset alpaca_en_demo --save_name ppl.json + """ + model_args, data_args, training_args, finetuning_args, _ = get_train_args( + dict( + stage=stage, + model_name_or_path=model_name_or_path, + dataset=dataset, + dataset_dir=dataset_dir, + template=template, + cutoff_len=cutoff_len, + max_samples=max_samples, + train_on_prompt=train_on_prompt, + preprocessing_num_workers=16, + output_dir="dummy_dir", + overwrite_cache=True, + do_train=True, + ) + ) + tokenizer_module = load_tokenizer(model_args) + tokenizer = tokenizer_module["tokenizer"] + template = get_template_and_fix_tokenizer(tokenizer, data_args) + trainset = get_dataset(template, model_args, data_args, training_args, stage, **tokenizer_module)["train_dataset"] + model = load_model(tokenizer, model_args, finetuning_args, is_trainable=False) + if stage == "pt": + data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) + elif stage == "sft": + data_collator = MultiModalDataCollatorForSeq2Seq( + template=template, tokenizer=tokenizer, label_pad_token_id=IGNORE_INDEX + ) + elif stage == "rm": + data_collator = PairwiseDataCollatorWithPadding( + template=template, tokenizer=tokenizer, label_pad_token_id=IGNORE_INDEX, train_on_prompt=train_on_prompt + ) + else: + raise NotImplementedError(f"Stage does not supported: {stage}.") + + dataloader = DataLoader(trainset, batch_size, shuffle=False, collate_fn=data_collator, pin_memory=True) + criterion = torch.nn.CrossEntropyLoss(reduction="none") + total_ppl = 0 + perplexities = [] + batch: dict[str, torch.Tensor] + with torch.no_grad(): + for batch in tqdm(dataloader, desc="Computing perplexities"): + batch = batch.to(model.device) + outputs = model(**batch) + shift_logits: torch.Tensor = outputs["logits"][..., :-1, :] + shift_labels: torch.Tensor = batch["labels"][..., 1:] + loss_mask = shift_labels != IGNORE_INDEX + flatten_logits = shift_logits.contiguous().view(shift_labels.size(0) * shift_labels.size(1), -1) + flatten_labels = shift_labels.contiguous().view(-1) + token_logps: torch.Tensor = criterion(flatten_logits, flatten_labels) + token_logps = token_logps.contiguous().view(shift_logits.size(0), -1) + sentence_logps = (token_logps * loss_mask).sum(-1) / loss_mask.sum(-1) + total_ppl += sentence_logps.exp().sum().item() + perplexities.extend(sentence_logps.exp().tolist()) + + with open(save_name, "w", encoding="utf-8") as f: + json.dump(perplexities, f, indent=2) + + print(f"Average perplexity is {total_ppl / len(perplexities):.2f}") + print(f"Perplexities have been saved at {save_name}.") + + +if __name__ == "__main__": + fire.Fire(calculate_ppl) diff --git a/post-training/LLaMA-Factory/scripts/stat_utils/length_cdf.py b/post-training/LLaMA-Factory/scripts/stat_utils/length_cdf.py new file mode 100644 index 0000000..c459c8f --- /dev/null +++ b/post-training/LLaMA-Factory/scripts/stat_utils/length_cdf.py @@ -0,0 +1,69 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import defaultdict + +import fire +from tqdm import tqdm + +from llamafactory.data import get_dataset, get_template_and_fix_tokenizer +from llamafactory.hparams import get_train_args +from llamafactory.model import load_tokenizer + + +def length_cdf( + model_name_or_path: str, + dataset: str = "alpaca_en_demo", + dataset_dir: str = "data", + template: str = "default", + interval: int = 1000, +): + r"""Calculate the distribution of the input lengths in the dataset. + + Usage: export CUDA_VISIBLE_DEVICES=0 + python length_cdf.py --model_name_or_path path_to_model --dataset alpaca_en_demo --template default + """ + model_args, data_args, training_args, _, _ = get_train_args( + dict( + stage="sft", + model_name_or_path=model_name_or_path, + dataset=dataset, + dataset_dir=dataset_dir, + template=template, + cutoff_len=1_000_000, + preprocessing_num_workers=16, + output_dir="dummy_dir", + overwrite_cache=True, + do_train=True, + ) + ) + tokenizer_module = load_tokenizer(model_args) + template = get_template_and_fix_tokenizer(tokenizer_module["tokenizer"], data_args) + trainset = get_dataset(template, model_args, data_args, training_args, "sft", **tokenizer_module)["train_dataset"] + total_num = len(trainset) + length_dict = defaultdict(int) + for sample in tqdm(trainset["input_ids"], desc="Collecting lengths"): + length_dict[len(sample) // interval * interval] += 1 + + length_tuples = list(length_dict.items()) + length_tuples.sort() + count_accu, prob_accu = 0, 0 + for length, count in length_tuples: + count_accu += count + prob_accu += count / total_num * 100 + print(f"{count_accu:d} ({prob_accu:.2f}%) samples have length < {length + interval}.") + + +if __name__ == "__main__": + fire.Fire(length_cdf) diff --git a/post-training/LLaMA-Factory/scripts/vllm_infer.py b/post-training/LLaMA-Factory/scripts/vllm_infer.py new file mode 100644 index 0000000..53391ee --- /dev/null +++ b/post-training/LLaMA-Factory/scripts/vllm_infer.py @@ -0,0 +1,162 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from typing import Optional + +import fire +from transformers import Seq2SeqTrainingArguments + +from llamafactory.data import get_dataset, get_template_and_fix_tokenizer +from llamafactory.extras.constants import IGNORE_INDEX +from llamafactory.extras.misc import get_device_count +from llamafactory.extras.packages import is_vllm_available +from llamafactory.hparams import get_infer_args +from llamafactory.model import load_tokenizer + + +if is_vllm_available(): + from vllm import LLM, SamplingParams + from vllm.lora.request import LoRARequest + + +def vllm_infer( + model_name_or_path: str, + adapter_name_or_path: str = None, + dataset: str = "alpaca_en_demo", + dataset_dir: str = "data", + template: str = "default", + cutoff_len: int = 2048, + max_samples: Optional[int] = None, + vllm_config: str = "{}", + save_name: str = "generated_predictions.jsonl", + temperature: float = 0.95, + top_p: float = 0.7, + top_k: int = 50, + max_new_tokens: int = 1024, + repetition_penalty: float = 1.0, + skip_special_tokens: bool = True, + seed: Optional[int] = None, + pipeline_parallel_size: int = 1, + image_max_pixels: int = 768 * 768, + image_min_pixels: int = 32 * 32, +): + r"""Perform batch generation using vLLM engine, which supports tensor parallelism. + + Usage: python vllm_infer.py --model_name_or_path meta-llama/Llama-2-7b-hf --template llama --dataset alpaca_en_demo + """ + if pipeline_parallel_size > get_device_count(): + raise ValueError("Pipeline parallel size should be smaller than the number of gpus.") + + model_args, data_args, _, generating_args = get_infer_args( + dict( + model_name_or_path=model_name_or_path, + adapter_name_or_path=adapter_name_or_path, + dataset=dataset, + dataset_dir=dataset_dir, + template=template, + cutoff_len=cutoff_len, + max_samples=max_samples, + preprocessing_num_workers=16, + vllm_config=vllm_config, + temperature=temperature, + top_p=top_p, + top_k=top_k, + max_new_tokens=max_new_tokens, + repetition_penalty=repetition_penalty, + ) + ) + + training_args = Seq2SeqTrainingArguments(output_dir="dummy_dir") + tokenizer_module = load_tokenizer(model_args) + tokenizer = tokenizer_module["tokenizer"] + template_obj = get_template_and_fix_tokenizer(tokenizer, data_args) + template_obj.mm_plugin.expand_mm_tokens = False # for vllm generate + dataset_module = get_dataset(template_obj, model_args, data_args, training_args, "ppo", **tokenizer_module) + + inputs, prompts, labels = [], [], [] + for sample in dataset_module["train_dataset"]: + if sample["images"]: + multi_modal_data = { + "image": template_obj.mm_plugin._regularize_images( + sample["images"], image_max_pixels=image_max_pixels, image_min_pixels=image_min_pixels + )["images"] + } + elif sample["videos"]: + multi_modal_data = { + "video": template_obj.mm_plugin._regularize_videos( + sample["videos"], image_max_pixels=image_max_pixels, image_min_pixels=image_min_pixels + )["videos"] + } + elif sample["audios"]: + audio_data = template_obj.mm_plugin._regularize_audios( + sample["audios"], + sampling_rate=16000, + ) + multi_modal_data = {"audio": zip(audio_data["audios"], audio_data["sampling_rates"])} + else: + multi_modal_data = None + + inputs.append({"prompt_token_ids": sample["input_ids"], "multi_modal_data": multi_modal_data}) + prompts.append(tokenizer.decode(sample["input_ids"], skip_special_tokens=skip_special_tokens)) + labels.append( + tokenizer.decode( + list(filter(lambda x: x != IGNORE_INDEX, sample["labels"])), skip_special_tokens=skip_special_tokens + ) + ) + + sampling_params = SamplingParams( + repetition_penalty=generating_args.repetition_penalty or 1.0, # repetition_penalty must > 0 + temperature=generating_args.temperature, + top_p=generating_args.top_p or 1.0, # top_p must > 0 + top_k=generating_args.top_k or -1, # top_k must > 0 + stop_token_ids=template_obj.get_stop_token_ids(tokenizer), + max_tokens=generating_args.max_new_tokens, + skip_special_tokens=skip_special_tokens, + seed=seed, + ) + if model_args.adapter_name_or_path is not None: + lora_request = LoRARequest("default", 1, model_args.adapter_name_or_path[0]) + else: + lora_request = None + + engine_args = { + "model": model_args.model_name_or_path, + "trust_remote_code": True, + "dtype": model_args.infer_dtype, + "max_model_len": cutoff_len + max_new_tokens, + "tensor_parallel_size": (get_device_count() // pipeline_parallel_size) or 1, + "pipeline_parallel_size": pipeline_parallel_size, + "disable_log_stats": True, + "enable_lora": model_args.adapter_name_or_path is not None, + } + if template_obj.mm_plugin.__class__.__name__ != "BasePlugin": + engine_args["limit_mm_per_prompt"] = {"image": 4, "video": 2, "audio": 2} + + if isinstance(model_args.vllm_config, dict): + engine_args.update(model_args.vllm_config) + + results = LLM(**engine_args).generate(inputs, sampling_params, lora_request=lora_request) + preds = [result.outputs[0].text for result in results] + with open(save_name, "w", encoding="utf-8") as f: + for text, pred, label in zip(prompts, preds, labels): + f.write(json.dumps({"prompt": text, "predict": pred, "label": label}, ensure_ascii=False) + "\n") + + print("*" * 70) + print(f"{len(prompts)} generated results have been saved at {save_name}.") + print("*" * 70) + + +if __name__ == "__main__": + fire.Fire(vllm_infer) diff --git a/post-training/LLaMA-Factory/setup.py b/post-training/LLaMA-Factory/setup.py new file mode 100644 index 0000000..e00edb3 --- /dev/null +++ b/post-training/LLaMA-Factory/setup.py @@ -0,0 +1,117 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re + +from setuptools import find_packages, setup + + +def get_version() -> str: + with open(os.path.join("src", "llamafactory", "extras", "env.py"), encoding="utf-8") as f: + file_content = f.read() + pattern = r"{}\W*=\W*\"([^\"]+)\"".format("VERSION") + (version,) = re.findall(pattern, file_content) + return version + + +def get_requires() -> list[str]: + with open("requirements.txt", encoding="utf-8") as f: + file_content = f.read() + lines = [line.strip() for line in file_content.strip().split("\n") if not line.startswith("#")] + return lines + + +def get_console_scripts() -> list[str]: + console_scripts = ["llamafactory-cli = llamafactory.cli:main"] + if os.getenv("ENABLE_SHORT_CONSOLE", "1").lower() in ["true", "y", "1"]: + console_scripts.append("lmf = llamafactory.cli:main") + + return console_scripts + + +extra_require = { + "torch": ["torch>=1.13.1"], + "torch-npu": ["torch==2.4.0", "torch-npu==2.4.0.post2", "decorator"], + "metrics": ["nltk", "jieba", "rouge-chinese"], + "deepspeed": ["deepspeed>=0.10.0,<=0.16.5"], + "liger-kernel": ["liger-kernel>=0.5.5"], + "bitsandbytes": ["bitsandbytes>=0.39.0"], + "hqq": ["hqq"], + "eetq": ["eetq"], + "gptq": ["optimum>=1.17.0", "auto-gptq>=0.5.0"], + "awq": ["autoawq"], + "aqlm": ["aqlm[gpu]>=1.1.0"], + "vllm": ["vllm>=0.4.3,<=0.8.4"], + "sglang": ["sglang[srt]>=0.4.5", "transformers==4.51.1"], + "galore": ["galore-torch"], + "apollo": ["apollo-torch"], + "badam": ["badam>=1.2.1"], + "adam-mini": ["adam-mini"], + "qwen": ["transformers_stream_generator"], + "minicpm_v": [ + "soundfile", + "torchvision", + "torchaudio", + "vector_quantize_pytorch", + "vocos", + "msgpack", + "referencing", + "jsonschema_specifications", + "transformers==4.48.3", + ], + "modelscope": ["modelscope"], + "openmind": ["openmind"], + "swanlab": ["swanlab"], + "dev": ["pre-commit", "ruff", "pytest", "build"], +} + + +def main(): + setup( + name="llamafactory", + version=get_version(), + author="hiyouga", + author_email="hiyouga@buaa.edu.cn", + description="Unified Efficient Fine-Tuning of 100+ LLMs", + long_description=open("README.md", encoding="utf-8").read(), + long_description_content_type="text/markdown", + keywords=["AI", "LLM", "GPT", "ChatGPT", "Llama", "Transformer", "DeepSeek", "Pytorch"], + license="Apache 2.0 License", + url="https://github.com/hiyouga/LLaMA-Factory", + package_dir={"": "src"}, + packages=find_packages("src"), + python_requires=">=3.9.0", + install_requires=get_requires(), + extras_require=extra_require, + entry_points={"console_scripts": get_console_scripts()}, + classifiers=[ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Intended Audience :: Education", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + ], + ) + + +if __name__ == "__main__": + main() diff --git a/post-training/LLaMA-Factory/src/api.py b/post-training/LLaMA-Factory/src/api.py new file mode 100644 index 0000000..6121545 --- /dev/null +++ b/post-training/LLaMA-Factory/src/api.py @@ -0,0 +1,33 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import uvicorn + +from llamafactory.api.app import create_app +from llamafactory.chat import ChatModel + + +def main(): + chat_model = ChatModel() + app = create_app(chat_model) + api_host = os.getenv("API_HOST", "0.0.0.0") + api_port = int(os.getenv("API_PORT", "8000")) + print(f"Visit http://localhost:{api_port}/docs for API document.") + uvicorn.run(app, host=api_host, port=api_port) + + +if __name__ == "__main__": + main() diff --git a/post-training/LLaMA-Factory/src/llamafactory/__init__.py b/post-training/LLaMA-Factory/src/llamafactory/__init__.py new file mode 100644 index 0000000..b1567ef --- /dev/null +++ b/post-training/LLaMA-Factory/src/llamafactory/__init__.py @@ -0,0 +1,31 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Efficient fine-tuning of large language models. + +Level: + api, webui > chat, eval, train > data, model > hparams > extras + +Disable version checking: DISABLE_VERSION_CHECK=1 +Enable VRAM recording: RECORD_VRAM=1 +Force using torchrun: FORCE_TORCHRUN=1 +Set logging verbosity: LLAMAFACTORY_VERBOSITY=WARN +Use modelscope: USE_MODELSCOPE_HUB=1 +Use openmind: USE_OPENMIND_HUB=1 +""" + +from .extras.env import VERSION + + +__version__ = VERSION diff --git a/post-training/LLaMA-Factory/src/llamafactory/api/__init__.py b/post-training/LLaMA-Factory/src/llamafactory/api/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/post-training/LLaMA-Factory/src/llamafactory/api/app.py b/post-training/LLaMA-Factory/src/llamafactory/api/app.py new file mode 100644 index 0000000..e0621d8 --- /dev/null +++ b/post-training/LLaMA-Factory/src/llamafactory/api/app.py @@ -0,0 +1,133 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import os +from contextlib import asynccontextmanager +from functools import partial +from typing import Annotated, Optional + +from ..chat import ChatModel +from ..extras.constants import EngineName +from ..extras.misc import torch_gc +from ..extras.packages import is_fastapi_available, is_starlette_available, is_uvicorn_available +from .chat import ( + create_chat_completion_response, + create_score_evaluation_response, + create_stream_chat_completion_response, +) +from .protocol import ( + ChatCompletionRequest, + ChatCompletionResponse, + ModelCard, + ModelList, + ScoreEvaluationRequest, + ScoreEvaluationResponse, +) + + +if is_fastapi_available(): + from fastapi import Depends, FastAPI, HTTPException, status + from fastapi.middleware.cors import CORSMiddleware + from fastapi.security.http import HTTPAuthorizationCredentials, HTTPBearer + + +if is_starlette_available(): + from sse_starlette import EventSourceResponse + + +if is_uvicorn_available(): + import uvicorn + + +async def sweeper() -> None: + while True: + torch_gc() + await asyncio.sleep(300) + + +@asynccontextmanager +async def lifespan(app: "FastAPI", chat_model: "ChatModel"): # collects GPU memory + if chat_model.engine.name == EngineName.HF: + asyncio.create_task(sweeper()) + + yield + torch_gc() + + +def create_app(chat_model: "ChatModel") -> "FastAPI": + root_path = os.getenv("FASTAPI_ROOT_PATH", "") + app = FastAPI(lifespan=partial(lifespan, chat_model=chat_model), root_path=root_path) + app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + api_key = os.getenv("API_KEY") + security = HTTPBearer(auto_error=False) + + async def verify_api_key(auth: Annotated[Optional[HTTPAuthorizationCredentials], Depends(security)]): + if api_key and (auth is None or auth.credentials != api_key): + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid API key.") + + @app.get( + "/v1/models", + response_model=ModelList, + status_code=status.HTTP_200_OK, + dependencies=[Depends(verify_api_key)], + ) + async def list_models(): + model_card = ModelCard(id=os.getenv("API_MODEL_NAME", "gpt-3.5-turbo")) + return ModelList(data=[model_card]) + + @app.post( + "/v1/chat/completions", + response_model=ChatCompletionResponse, + status_code=status.HTTP_200_OK, + dependencies=[Depends(verify_api_key)], + ) + async def create_chat_completion(request: ChatCompletionRequest): + if not chat_model.engine.can_generate: + raise HTTPException(status_code=status.HTTP_405_METHOD_NOT_ALLOWED, detail="Not allowed") + + if request.stream: + generate = create_stream_chat_completion_response(request, chat_model) + return EventSourceResponse(generate, media_type="text/event-stream", sep="\n") + else: + return await create_chat_completion_response(request, chat_model) + + @app.post( + "/v1/score/evaluation", + response_model=ScoreEvaluationResponse, + status_code=status.HTTP_200_OK, + dependencies=[Depends(verify_api_key)], + ) + async def create_score_evaluation(request: ScoreEvaluationRequest): + if chat_model.engine.can_generate: + raise HTTPException(status_code=status.HTTP_405_METHOD_NOT_ALLOWED, detail="Not allowed") + + return await create_score_evaluation_response(request, chat_model) + + return app + + +def run_api() -> None: + chat_model = ChatModel() + app = create_app(chat_model) + api_host = os.getenv("API_HOST", "0.0.0.0") + api_port = int(os.getenv("API_PORT", "8000")) + print(f"Visit http://localhost:{api_port}/docs for API document.") + uvicorn.run(app, host=api_host, port=api_port) diff --git a/post-training/LLaMA-Factory/src/llamafactory/api/chat.py b/post-training/LLaMA-Factory/src/llamafactory/api/chat.py new file mode 100644 index 0000000..c97197d --- /dev/null +++ b/post-training/LLaMA-Factory/src/llamafactory/api/chat.py @@ -0,0 +1,283 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import io +import json +import os +import re +import uuid +from collections.abc import AsyncGenerator +from typing import TYPE_CHECKING, Optional + +from ..data import Role as DataRole +from ..extras import logging +from ..extras.constants import AUDIO_PLACEHOLDER, IMAGE_PLACEHOLDER, VIDEO_PLACEHOLDER +from ..extras.misc import is_env_enabled +from ..extras.packages import is_fastapi_available, is_pillow_available, is_requests_available +from .common import dictify, jsonify +from .protocol import ( + ChatCompletionMessage, + ChatCompletionResponse, + ChatCompletionResponseChoice, + ChatCompletionResponseUsage, + ChatCompletionStreamResponse, + ChatCompletionStreamResponseChoice, + Finish, + Function, + FunctionCall, + Role, + ScoreEvaluationResponse, +) + + +if is_fastapi_available(): + from fastapi import HTTPException, status + + +if is_pillow_available(): + from PIL import Image + + +if is_requests_available(): + import requests + + +if TYPE_CHECKING: + from ..chat import ChatModel + from ..data.mm_plugin import AudioInput, ImageInput, VideoInput + from .protocol import ChatCompletionRequest, ScoreEvaluationRequest + + +logger = logging.get_logger(__name__) +ROLE_MAPPING = { + Role.USER: DataRole.USER.value, + Role.ASSISTANT: DataRole.ASSISTANT.value, + Role.SYSTEM: DataRole.SYSTEM.value, + Role.FUNCTION: DataRole.FUNCTION.value, + Role.TOOL: DataRole.OBSERVATION.value, +} + + +def _process_request( + request: "ChatCompletionRequest", +) -> tuple[ + list[dict[str, str]], + Optional[str], + Optional[str], + Optional[list["ImageInput"]], + Optional[list["VideoInput"]], + Optional[list["AudioInput"]], +]: + if is_env_enabled("API_VERBOSE", "1"): + logger.info_rank0(f"==== request ====\n{json.dumps(dictify(request), indent=2, ensure_ascii=False)}") + + if len(request.messages) == 0: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid length") + + if request.messages[0].role == Role.SYSTEM: + content = request.messages.pop(0).content + system = content[0].text if isinstance(content, list) else content + else: + system = None + + if len(request.messages) % 2 == 0: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Only supports u/a/u/a/u...") + + input_messages = [] + images, videos, audios = [], [], [] + for i, message in enumerate(request.messages): + if i % 2 == 0 and message.role not in [Role.USER, Role.TOOL]: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid role") + elif i % 2 == 1 and message.role not in [Role.ASSISTANT, Role.FUNCTION]: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid role") + + if message.role == Role.ASSISTANT and isinstance(message.tool_calls, list) and len(message.tool_calls): + tool_calls = [ + {"name": tool_call.function.name, "arguments": tool_call.function.arguments} + for tool_call in message.tool_calls + ] + content = json.dumps(tool_calls, ensure_ascii=False) + input_messages.append({"role": ROLE_MAPPING[Role.FUNCTION], "content": content}) + elif isinstance(message.content, list): + text_content = "" + for input_item in message.content: + if input_item.type == "text": + text_content += input_item.text + elif input_item.type == "image_url": + text_content += IMAGE_PLACEHOLDER + image_url = input_item.image_url.url + if re.match(r"^data:image\/(png|jpg|jpeg|gif|bmp);base64,(.+)$", image_url): # base64 image + image_stream = io.BytesIO(base64.b64decode(image_url.split(",", maxsplit=1)[1])) + elif os.path.isfile(image_url): # local file + image_stream = open(image_url, "rb") + else: # web uri + image_stream = requests.get(image_url, stream=True).raw + + images.append(Image.open(image_stream).convert("RGB")) + elif input_item.type == "video_url": + text_content += VIDEO_PLACEHOLDER + video_url = input_item.video_url.url + if re.match(r"^data:video\/(mp4|mkv|avi|mov);base64,(.+)$", video_url): # base64 video + video_stream = io.BytesIO(base64.b64decode(video_url.split(",", maxsplit=1)[1])) + elif os.path.isfile(video_url): # local file + video_stream = open(video_url, "rb") + else: # web uri + video_stream = requests.get(video_url, stream=True).raw + + videos.append(video_stream) + elif input_item.type == "audio_url": + text_content += AUDIO_PLACEHOLDER + audio_url = input_item.audio_url.url + if re.match(r"^data:audio\/(mpeg|mp3|wav|ogg);base64,(.+)$", audio_url): # base64 audio + audio_stream = io.BytesIO(base64.b64decode(audio_url.split(",", maxsplit=1)[1])) + elif os.path.isfile(audio_url): # local file + audio_stream = open(audio_url, "rb") + else: # web uri + audio_stream = requests.get(audio_url, stream=True).raw + + audios.append(audio_stream) + else: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, detail=f"Invalid input type {input_item.type}." + ) + + input_messages.append({"role": ROLE_MAPPING[message.role], "content": text_content}) + else: + input_messages.append({"role": ROLE_MAPPING[message.role], "content": message.content}) + + tool_list = request.tools + if isinstance(tool_list, list) and len(tool_list): + try: + tools = json.dumps([dictify(tool.function) for tool in tool_list], ensure_ascii=False) + except json.JSONDecodeError: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid tools") + else: + tools = None + + return input_messages, system, tools, images or None, videos or None, audios or None + + +def _create_stream_chat_completion_chunk( + completion_id: str, + model: str, + delta: "ChatCompletionMessage", + index: Optional[int] = 0, + finish_reason: Optional["Finish"] = None, +) -> str: + choice_data = ChatCompletionStreamResponseChoice(index=index, delta=delta, finish_reason=finish_reason) + chunk = ChatCompletionStreamResponse(id=completion_id, model=model, choices=[choice_data]) + return jsonify(chunk) + + +async def create_chat_completion_response( + request: "ChatCompletionRequest", chat_model: "ChatModel" +) -> "ChatCompletionResponse": + completion_id = f"chatcmpl-{uuid.uuid4().hex}" + input_messages, system, tools, images, videos, audios = _process_request(request) + responses = await chat_model.achat( + input_messages, + system, + tools, + images, + videos, + audios, + do_sample=request.do_sample, + temperature=request.temperature, + top_p=request.top_p, + max_new_tokens=request.max_tokens, + num_return_sequences=request.n, + stop=request.stop, + ) + + prompt_length, response_length = 0, 0 + choices = [] + for i, response in enumerate(responses): + if tools: + result = chat_model.engine.template.extract_tool(response.response_text) + else: + result = response.response_text + + if isinstance(result, list): + tool_calls = [] + for tool in result: + function = Function(name=tool.name, arguments=tool.arguments) + tool_calls.append(FunctionCall(id=f"call_{uuid.uuid4().hex}", function=function)) + + response_message = ChatCompletionMessage(role=Role.ASSISTANT, tool_calls=tool_calls) + finish_reason = Finish.TOOL + else: + response_message = ChatCompletionMessage(role=Role.ASSISTANT, content=result) + finish_reason = Finish.STOP if response.finish_reason == "stop" else Finish.LENGTH + + choices.append(ChatCompletionResponseChoice(index=i, message=response_message, finish_reason=finish_reason)) + prompt_length = response.prompt_length + response_length += response.response_length + + usage = ChatCompletionResponseUsage( + prompt_tokens=prompt_length, + completion_tokens=response_length, + total_tokens=prompt_length + response_length, + ) + + return ChatCompletionResponse(id=completion_id, model=request.model, choices=choices, usage=usage) + + +async def create_stream_chat_completion_response( + request: "ChatCompletionRequest", chat_model: "ChatModel" +) -> AsyncGenerator[str, None]: + completion_id = f"chatcmpl-{uuid.uuid4().hex}" + input_messages, system, tools, images, videos, audios = _process_request(request) + if tools: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Cannot stream function calls.") + + if request.n > 1: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Cannot stream multiple responses.") + + yield _create_stream_chat_completion_chunk( + completion_id=completion_id, model=request.model, delta=ChatCompletionMessage(role=Role.ASSISTANT, content="") + ) + async for new_token in chat_model.astream_chat( + input_messages, + system, + tools, + images, + videos, + audios, + do_sample=request.do_sample, + temperature=request.temperature, + top_p=request.top_p, + max_new_tokens=request.max_tokens, + stop=request.stop, + ): + if len(new_token) != 0: + yield _create_stream_chat_completion_chunk( + completion_id=completion_id, model=request.model, delta=ChatCompletionMessage(content=new_token) + ) + + yield _create_stream_chat_completion_chunk( + completion_id=completion_id, model=request.model, delta=ChatCompletionMessage(), finish_reason=Finish.STOP + ) + yield "[DONE]" + + +async def create_score_evaluation_response( + request: "ScoreEvaluationRequest", chat_model: "ChatModel" +) -> "ScoreEvaluationResponse": + score_id = f"scoreval-{uuid.uuid4().hex}" + if len(request.messages) == 0: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid request") + + scores = await chat_model.aget_scores(request.messages, max_length=request.max_length) + return ScoreEvaluationResponse(id=score_id, model=request.model, scores=scores) diff --git a/post-training/LLaMA-Factory/src/llamafactory/api/common.py b/post-training/LLaMA-Factory/src/llamafactory/api/common.py new file mode 100644 index 0000000..f4d0c2f --- /dev/null +++ b/post-training/LLaMA-Factory/src/llamafactory/api/common.py @@ -0,0 +1,34 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from typing import TYPE_CHECKING, Any + + +if TYPE_CHECKING: + from pydantic import BaseModel + + +def dictify(data: "BaseModel") -> dict[str, Any]: + try: # pydantic v2 + return data.model_dump(exclude_unset=True) + except AttributeError: # pydantic v1 + return data.dict(exclude_unset=True) + + +def jsonify(data: "BaseModel") -> str: + try: # pydantic v2 + return json.dumps(data.model_dump(exclude_unset=True), ensure_ascii=False) + except AttributeError: # pydantic v1 + return data.json(exclude_unset=True, ensure_ascii=False) diff --git a/post-training/LLaMA-Factory/src/llamafactory/api/protocol.py b/post-training/LLaMA-Factory/src/llamafactory/api/protocol.py new file mode 100644 index 0000000..ac9746e --- /dev/null +++ b/post-training/LLaMA-Factory/src/llamafactory/api/protocol.py @@ -0,0 +1,156 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +from enum import Enum, unique +from typing import Any, Optional, Union + +from pydantic import BaseModel, Field +from typing_extensions import Literal + + +@unique +class Role(str, Enum): + USER = "user" + ASSISTANT = "assistant" + SYSTEM = "system" + FUNCTION = "function" + TOOL = "tool" + + +@unique +class Finish(str, Enum): + STOP = "stop" + LENGTH = "length" + TOOL = "tool_calls" + + +class ModelCard(BaseModel): + id: str + object: Literal["model"] = "model" + created: int = Field(default_factory=lambda: int(time.time())) + owned_by: Literal["owner"] = "owner" + + +class ModelList(BaseModel): + object: Literal["list"] = "list" + data: list[ModelCard] = [] + + +class Function(BaseModel): + name: str + arguments: str + + +class FunctionDefinition(BaseModel): + name: str + description: str + parameters: dict[str, Any] + + +class FunctionAvailable(BaseModel): + type: Literal["function", "code_interpreter"] = "function" + function: Optional[FunctionDefinition] = None + + +class FunctionCall(BaseModel): + id: str + type: Literal["function"] = "function" + function: Function + + +class URL(BaseModel): + url: str + detail: Literal["auto", "low", "high"] = "auto" + + +class MultimodalInputItem(BaseModel): + type: Literal["text", "image_url", "video_url", "audio_url"] + text: Optional[str] = None + image_url: Optional[URL] = None + video_url: Optional[URL] = None + audio_url: Optional[URL] = None + + +class ChatMessage(BaseModel): + role: Role + content: Optional[Union[str, list[MultimodalInputItem]]] = None + tool_calls: Optional[list[FunctionCall]] = None + + +class ChatCompletionMessage(BaseModel): + role: Optional[Role] = None + content: Optional[str] = None + tool_calls: Optional[list[FunctionCall]] = None + + +class ChatCompletionRequest(BaseModel): + model: str + messages: list[ChatMessage] + tools: Optional[list[FunctionAvailable]] = None + do_sample: Optional[bool] = None + temperature: Optional[float] = None + top_p: Optional[float] = None + n: int = 1 + max_tokens: Optional[int] = None + stop: Optional[Union[str, list[str]]] = None + stream: bool = False + + +class ChatCompletionResponseChoice(BaseModel): + index: int + message: ChatCompletionMessage + finish_reason: Finish + + +class ChatCompletionStreamResponseChoice(BaseModel): + index: int + delta: ChatCompletionMessage + finish_reason: Optional[Finish] = None + + +class ChatCompletionResponseUsage(BaseModel): + prompt_tokens: int + completion_tokens: int + total_tokens: int + + +class ChatCompletionResponse(BaseModel): + id: str + object: Literal["chat.completion"] = "chat.completion" + created: int = Field(default_factory=lambda: int(time.time())) + model: str + choices: list[ChatCompletionResponseChoice] + usage: ChatCompletionResponseUsage + + +class ChatCompletionStreamResponse(BaseModel): + id: str + object: Literal["chat.completion.chunk"] = "chat.completion.chunk" + created: int = Field(default_factory=lambda: int(time.time())) + model: str + choices: list[ChatCompletionStreamResponseChoice] + + +class ScoreEvaluationRequest(BaseModel): + model: str + messages: list[str] + max_length: Optional[int] = None + + +class ScoreEvaluationResponse(BaseModel): + id: str + object: Literal["score.evaluation"] = "score.evaluation" + model: str + scores: list[float] diff --git a/post-training/LLaMA-Factory/src/llamafactory/chat/__init__.py b/post-training/LLaMA-Factory/src/llamafactory/chat/__init__.py new file mode 100644 index 0000000..15d8b9b --- /dev/null +++ b/post-training/LLaMA-Factory/src/llamafactory/chat/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .base_engine import BaseEngine +from .chat_model import ChatModel + + +__all__ = ["BaseEngine", "ChatModel"] diff --git a/post-training/LLaMA-Factory/src/llamafactory/chat/base_engine.py b/post-training/LLaMA-Factory/src/llamafactory/chat/base_engine.py new file mode 100644 index 0000000..6d497c1 --- /dev/null +++ b/post-training/LLaMA-Factory/src/llamafactory/chat/base_engine.py @@ -0,0 +1,98 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod +from collections.abc import AsyncGenerator +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any, Literal, Optional, Union + + +if TYPE_CHECKING: + from transformers import PreTrainedModel, PreTrainedTokenizer + from vllm import AsyncLLMEngine + + from ..data import Template + from ..data.mm_plugin import AudioInput, ImageInput, VideoInput + from ..extras.constants import EngineName + from ..hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments + + +@dataclass +class Response: + response_text: str + response_length: int + prompt_length: int + finish_reason: Literal["stop", "length"] + + +class BaseEngine(ABC): + r"""Base class for inference engine of chat models. + + Must implements async methods: chat(), stream_chat() and get_scores(). + """ + + name: "EngineName" + model: Union["PreTrainedModel", "AsyncLLMEngine"] + tokenizer: "PreTrainedTokenizer" + can_generate: bool + template: "Template" + generating_args: dict[str, Any] + + @abstractmethod + def __init__( + self, + model_args: "ModelArguments", + data_args: "DataArguments", + finetuning_args: "FinetuningArguments", + generating_args: "GeneratingArguments", + ) -> None: + r"""Initialize an inference engine.""" + ... + + @abstractmethod + async def chat( + self, + messages: list[dict[str, str]], + system: Optional[str] = None, + tools: Optional[str] = None, + images: Optional[list["ImageInput"]] = None, + videos: Optional[list["VideoInput"]] = None, + audios: Optional[list["AudioInput"]] = None, + **input_kwargs, + ) -> list["Response"]: + r"""Get a list of responses of the chat model.""" + ... + + @abstractmethod + async def stream_chat( + self, + messages: list[dict[str, str]], + system: Optional[str] = None, + tools: Optional[str] = None, + images: Optional[list["ImageInput"]] = None, + videos: Optional[list["VideoInput"]] = None, + audios: Optional[list["AudioInput"]] = None, + **input_kwargs, + ) -> AsyncGenerator[str, None]: + r"""Get the response token-by-token of the chat model.""" + ... + + @abstractmethod + async def get_scores( + self, + batch_input: list[str], + **input_kwargs, + ) -> list[float]: + r"""Get a list of scores of the reward model.""" + ... diff --git a/post-training/LLaMA-Factory/src/llamafactory/chat/chat_model.py b/post-training/LLaMA-Factory/src/llamafactory/chat/chat_model.py new file mode 100644 index 0000000..0022eed --- /dev/null +++ b/post-training/LLaMA-Factory/src/llamafactory/chat/chat_model.py @@ -0,0 +1,184 @@ +# Copyright 2025 THUDM and the LlamaFactory team. +# +# This code is inspired by the THUDM's ChatGLM implementation. +# https://github.com/THUDM/ChatGLM-6B/blob/main/cli_demo.py +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import os +from collections.abc import AsyncGenerator, Generator +from threading import Thread +from typing import TYPE_CHECKING, Any, Optional + +from ..extras.constants import EngineName +from ..extras.misc import torch_gc +from ..hparams import get_infer_args +from .hf_engine import HuggingfaceEngine +from .sglang_engine import SGLangEngine +from .vllm_engine import VllmEngine + + +if TYPE_CHECKING: + from ..data.mm_plugin import AudioInput, ImageInput, VideoInput + from .base_engine import BaseEngine, Response + + +def _start_background_loop(loop: "asyncio.AbstractEventLoop") -> None: + asyncio.set_event_loop(loop) + loop.run_forever() + + +class ChatModel: + r"""General class for chat models. Backed by huggingface or vllm engines. + + Supports both sync and async methods. + Sync methods: chat(), stream_chat() and get_scores(). + Async methods: achat(), astream_chat() and aget_scores(). + """ + + def __init__(self, args: Optional[dict[str, Any]] = None) -> None: + model_args, data_args, finetuning_args, generating_args = get_infer_args(args) + if model_args.infer_backend == EngineName.HF: + self.engine: BaseEngine = HuggingfaceEngine(model_args, data_args, finetuning_args, generating_args) + elif model_args.infer_backend == EngineName.VLLM: + self.engine: BaseEngine = VllmEngine(model_args, data_args, finetuning_args, generating_args) + elif model_args.infer_backend == EngineName.SGLANG: + self.engine: BaseEngine = SGLangEngine(model_args, data_args, finetuning_args, generating_args) + else: + raise NotImplementedError(f"Unknown backend: {model_args.infer_backend}") + + self._loop = asyncio.new_event_loop() + self._thread = Thread(target=_start_background_loop, args=(self._loop,), daemon=True) + self._thread.start() + + def chat( + self, + messages: list[dict[str, str]], + system: Optional[str] = None, + tools: Optional[str] = None, + images: Optional[list["ImageInput"]] = None, + videos: Optional[list["VideoInput"]] = None, + audios: Optional[list["AudioInput"]] = None, + **input_kwargs, + ) -> list["Response"]: + r"""Get a list of responses of the chat model.""" + task = asyncio.run_coroutine_threadsafe( + self.achat(messages, system, tools, images, videos, audios, **input_kwargs), self._loop + ) + return task.result() + + async def achat( + self, + messages: list[dict[str, str]], + system: Optional[str] = None, + tools: Optional[str] = None, + images: Optional[list["ImageInput"]] = None, + videos: Optional[list["VideoInput"]] = None, + audios: Optional[list["AudioInput"]] = None, + **input_kwargs, + ) -> list["Response"]: + r"""Asynchronously get a list of responses of the chat model.""" + return await self.engine.chat(messages, system, tools, images, videos, audios, **input_kwargs) + + def stream_chat( + self, + messages: list[dict[str, str]], + system: Optional[str] = None, + tools: Optional[str] = None, + images: Optional[list["ImageInput"]] = None, + videos: Optional[list["VideoInput"]] = None, + audios: Optional[list["AudioInput"]] = None, + **input_kwargs, + ) -> Generator[str, None, None]: + r"""Get the response token-by-token of the chat model.""" + generator = self.astream_chat(messages, system, tools, images, videos, audios, **input_kwargs) + while True: + try: + task = asyncio.run_coroutine_threadsafe(generator.__anext__(), self._loop) + yield task.result() + except StopAsyncIteration: + break + + async def astream_chat( + self, + messages: list[dict[str, str]], + system: Optional[str] = None, + tools: Optional[str] = None, + images: Optional[list["ImageInput"]] = None, + videos: Optional[list["VideoInput"]] = None, + audios: Optional[list["AudioInput"]] = None, + **input_kwargs, + ) -> AsyncGenerator[str, None]: + r"""Asynchronously get the response token-by-token of the chat model.""" + async for new_token in self.engine.stream_chat( + messages, system, tools, images, videos, audios, **input_kwargs + ): + yield new_token + + def get_scores( + self, + batch_input: list[str], + **input_kwargs, + ) -> list[float]: + r"""Get a list of scores of the reward model.""" + task = asyncio.run_coroutine_threadsafe(self.aget_scores(batch_input, **input_kwargs), self._loop) + return task.result() + + async def aget_scores( + self, + batch_input: list[str], + **input_kwargs, + ) -> list[float]: + r"""Asynchronously get a list of scores of the reward model.""" + return await self.engine.get_scores(batch_input, **input_kwargs) + + +def run_chat() -> None: + if os.name != "nt": + try: + import readline # noqa: F401 + except ImportError: + print("Install `readline` for a better experience.") + + chat_model = ChatModel() + messages = [] + print("Welcome to the CLI application, use `clear` to remove the history, use `exit` to exit the application.") + + while True: + try: + query = input("\nUser: ") + except UnicodeDecodeError: + print("Detected decoding error at the inputs, please set the terminal encoding to utf-8.") + continue + except Exception: + raise + + if query.strip() == "exit": + break + + if query.strip() == "clear": + messages = [] + torch_gc() + print("History has been removed.") + continue + + messages.append({"role": "user", "content": query}) + print("Assistant: ", end="", flush=True) + + response = "" + for new_text in chat_model.stream_chat(messages): + print(new_text, end="", flush=True) + response += new_text + print() + messages.append({"role": "assistant", "content": response}) diff --git a/post-training/LLaMA-Factory/src/llamafactory/chat/hf_engine.py b/post-training/LLaMA-Factory/src/llamafactory/chat/hf_engine.py new file mode 100644 index 0000000..20a3c19 --- /dev/null +++ b/post-training/LLaMA-Factory/src/llamafactory/chat/hf_engine.py @@ -0,0 +1,413 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import os +from collections.abc import AsyncGenerator +from threading import Thread +from typing import TYPE_CHECKING, Any, Callable, Optional, Union + +import torch +from transformers import GenerationConfig, TextIteratorStreamer +from typing_extensions import override + +from ..data import get_template_and_fix_tokenizer +from ..extras import logging +from ..extras.constants import AUDIO_PLACEHOLDER, IMAGE_PLACEHOLDER, VIDEO_PLACEHOLDER, EngineName +from ..model import load_model, load_tokenizer +from .base_engine import BaseEngine, Response + + +if TYPE_CHECKING: + from transformers import PreTrainedModel, PreTrainedTokenizer, ProcessorMixin + from trl import PreTrainedModelWrapper + + from ..data import Template + from ..data.mm_plugin import AudioInput, ImageInput, VideoInput + from ..hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments + + +logger = logging.get_logger(__name__) + + +class HuggingfaceEngine(BaseEngine): + def __init__( + self, + model_args: "ModelArguments", + data_args: "DataArguments", + finetuning_args: "FinetuningArguments", + generating_args: "GeneratingArguments", + ) -> None: + self.name = EngineName.HF + self.can_generate = finetuning_args.stage == "sft" + tokenizer_module = load_tokenizer(model_args) + self.tokenizer = tokenizer_module["tokenizer"] + self.processor = tokenizer_module["processor"] + self.tokenizer.padding_side = "left" if self.can_generate else "right" + self.template = get_template_and_fix_tokenizer(self.tokenizer, data_args) + self.model = load_model( + self.tokenizer, model_args, finetuning_args, is_trainable=False, add_valuehead=(not self.can_generate) + ) # must after fixing tokenizer to resize vocab + self.generating_args = generating_args.to_dict() + try: + asyncio.get_event_loop() + except RuntimeError: + logger.warning_rank0_once("There is no current event loop, creating a new one.") + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + self.semaphore = asyncio.Semaphore(int(os.getenv("MAX_CONCURRENT", "1"))) + + @staticmethod + def _process_args( + model: "PreTrainedModel", + tokenizer: "PreTrainedTokenizer", + processor: Optional["ProcessorMixin"], + template: "Template", + generating_args: dict[str, Any], + messages: list[dict[str, str]], + system: Optional[str] = None, + tools: Optional[str] = None, + images: Optional[list["ImageInput"]] = None, + videos: Optional[list["VideoInput"]] = None, + audios: Optional[list["AudioInput"]] = None, + input_kwargs: Optional[dict[str, Any]] = {}, + ) -> tuple[dict[str, Any], int]: + mm_input_dict = {"images": [], "videos": [], "audios": [], "imglens": [0], "vidlens": [0], "audlens": [0]} + if images is not None: + mm_input_dict.update({"images": images, "imglens": [len(images)]}) + if not any(IMAGE_PLACEHOLDER in message["content"] for message in messages): + messages[0]["content"] = IMAGE_PLACEHOLDER * len(images) + messages[0]["content"] + + if videos is not None: + mm_input_dict.update({"videos": videos, "vidlens": [len(videos)]}) + if not any(VIDEO_PLACEHOLDER in message["content"] for message in messages): + messages[0]["content"] = VIDEO_PLACEHOLDER * len(videos) + messages[0]["content"] + + if audios is not None: + mm_input_dict.update({"audios": audios, "audlens": [len(audios)]}) + if not any(AUDIO_PLACEHOLDER in message["content"] for message in messages): + messages[0]["content"] = AUDIO_PLACEHOLDER * len(audios) + messages[0]["content"] + + messages = template.mm_plugin.process_messages( + messages, mm_input_dict["images"], mm_input_dict["videos"], mm_input_dict["audios"], processor + ) + paired_messages = messages + [{"role": "assistant", "content": ""}] + system = system or generating_args["default_system"] + prompt_ids, _ = template.encode_oneturn(tokenizer, paired_messages, system, tools) + prompt_ids, _ = template.mm_plugin.process_token_ids( + prompt_ids, + None, + mm_input_dict["images"], + mm_input_dict["videos"], + mm_input_dict["audios"], + tokenizer, + processor, + ) + prompt_length = len(prompt_ids) + inputs = torch.tensor([prompt_ids], device=model.device) + attention_mask = torch.ones_like(inputs, dtype=torch.bool) + + do_sample: Optional[bool] = input_kwargs.pop("do_sample", None) + temperature: Optional[float] = input_kwargs.pop("temperature", None) + top_p: Optional[float] = input_kwargs.pop("top_p", None) + top_k: Optional[float] = input_kwargs.pop("top_k", None) + num_return_sequences: int = input_kwargs.pop("num_return_sequences", 1) + repetition_penalty: Optional[float] = input_kwargs.pop("repetition_penalty", None) + length_penalty: Optional[float] = input_kwargs.pop("length_penalty", None) + skip_special_tokens: Optional[bool] = input_kwargs.pop("skip_special_tokens", None) + max_length: Optional[int] = input_kwargs.pop("max_length", None) + max_new_tokens: Optional[int] = input_kwargs.pop("max_new_tokens", None) + stop: Optional[Union[str, list[str]]] = input_kwargs.pop("stop", None) + + if stop is not None: + logger.warning_rank0("Stop parameter is not supported by the huggingface engine yet.") + + generating_args = generating_args.copy() + generating_args.update( + dict( + do_sample=do_sample if do_sample is not None else generating_args["do_sample"], + temperature=temperature if temperature is not None else generating_args["temperature"], + top_p=top_p if top_p is not None else generating_args["top_p"], + top_k=top_k if top_k is not None else generating_args["top_k"], + num_return_sequences=num_return_sequences, + repetition_penalty=repetition_penalty + if repetition_penalty is not None + else generating_args["repetition_penalty"], + length_penalty=length_penalty if length_penalty is not None else generating_args["length_penalty"], + skip_special_tokens=skip_special_tokens + if skip_special_tokens is not None + else generating_args["skip_special_tokens"], + eos_token_id=template.get_stop_token_ids(tokenizer), + pad_token_id=tokenizer.pad_token_id, + ) + ) + + if isinstance(num_return_sequences, int) and num_return_sequences > 1: # do_sample needs temperature > 0 + generating_args["do_sample"] = True + generating_args["temperature"] = generating_args["temperature"] or 1.0 + + if not generating_args["temperature"]: + generating_args["do_sample"] = False + + if not generating_args["do_sample"]: + generating_args.pop("temperature", None) + generating_args.pop("top_p", None) + + if max_length: + generating_args.pop("max_new_tokens", None) + generating_args["max_length"] = max_length + + if max_new_tokens: + generating_args.pop("max_length", None) + generating_args["max_new_tokens"] = max_new_tokens + + gen_kwargs = dict( + inputs=inputs, + attention_mask=attention_mask, + generation_config=GenerationConfig(**generating_args), + ) + + mm_inputs = template.mm_plugin.get_mm_inputs(**mm_input_dict, batch_ids=[prompt_ids], processor=processor) + for key, value in mm_inputs.items(): + if isinstance(value, list) and isinstance(value[0], torch.Tensor): # for pixtral inputs + value = torch.stack(value) # assume they have same sizes + elif ( + isinstance(value, list) and isinstance(value[0], list) and isinstance(value[0][0], torch.Tensor) + ): # for minicpmv inputs + value = torch.stack([torch.stack(v) for v in value]) + elif not isinstance(value, torch.Tensor): + value = torch.tensor(value) + + if torch.is_floating_point(value): # cast data dtype for paligemma + value = value.to(model.dtype) + + if key == "second_per_grid_ts": # qwen2.5vl special case + gen_kwargs[key] = value.tolist() + else: + gen_kwargs[key] = value.to(model.device) + + if getattr(model.config, "model_type", None) in ["minicpmv", "minicpmo"]: + gen_kwargs["input_ids"] = inputs + gen_kwargs["tokenizer"] = tokenizer + if "audio_feature_lens" in mm_inputs: + gen_kwargs["audio_feature_lens"] = mm_inputs["audio_feature_lens"] + + gen_kwargs.pop("image_sizes", None) + + return gen_kwargs, prompt_length + + @staticmethod + @torch.inference_mode() + def _chat( + model: "PreTrainedModel", + tokenizer: "PreTrainedTokenizer", + processor: Optional["ProcessorMixin"], + template: "Template", + generating_args: dict[str, Any], + messages: list[dict[str, str]], + system: Optional[str] = None, + tools: Optional[str] = None, + images: Optional[list["ImageInput"]] = None, + videos: Optional[list["VideoInput"]] = None, + audios: Optional[list["AudioInput"]] = None, + input_kwargs: Optional[dict[str, Any]] = {}, + ) -> list["Response"]: + gen_kwargs, prompt_length = HuggingfaceEngine._process_args( + model, + tokenizer, + processor, + template, + generating_args, + messages, + system, + tools, + images, + videos, + audios, + input_kwargs, + ) + generate_output = model.generate(**gen_kwargs) + if isinstance(generate_output, tuple): + generate_output = generate_output[1][0] # post-process the minicpm_o output + + response_ids = generate_output[:, prompt_length:] + response = tokenizer.batch_decode( + response_ids, + skip_special_tokens=getattr(gen_kwargs["generation_config"], "skip_special_tokens", True), + clean_up_tokenization_spaces=True, + ) + results = [] + for i in range(len(response)): + eos_index = (response_ids[i] == tokenizer.eos_token_id).nonzero() + response_length = (eos_index[0].item() + 1) if len(eos_index) else len(response_ids[i]) + results.append( + Response( + response_text=response[i], + response_length=response_length, + prompt_length=prompt_length, + finish_reason="stop" if len(eos_index) else "length", + ) + ) + + return results + + @staticmethod + @torch.inference_mode() + def _stream_chat( + model: "PreTrainedModel", + tokenizer: "PreTrainedTokenizer", + processor: Optional["ProcessorMixin"], + template: "Template", + generating_args: dict[str, Any], + messages: list[dict[str, str]], + system: Optional[str] = None, + tools: Optional[str] = None, + images: Optional[list["ImageInput"]] = None, + videos: Optional[list["VideoInput"]] = None, + audios: Optional[list["AudioInput"]] = None, + input_kwargs: Optional[dict[str, Any]] = {}, + ) -> Callable[[], str]: + gen_kwargs, _ = HuggingfaceEngine._process_args( + model, + tokenizer, + processor, + template, + generating_args, + messages, + system, + tools, + images, + videos, + audios, + input_kwargs, + ) + streamer = TextIteratorStreamer( + tokenizer, + skip_prompt=True, + skip_special_tokens=getattr(gen_kwargs["generation_config"], "skip_special_tokens", True), + ) + gen_kwargs["streamer"] = streamer + thread = Thread(target=model.generate, kwargs=gen_kwargs, daemon=True) + thread.start() + + def stream(): + try: + return streamer.__next__() + except StopIteration: + raise StopAsyncIteration() + + return stream + + @staticmethod + @torch.inference_mode() + def _get_scores( + model: "PreTrainedModelWrapper", + tokenizer: "PreTrainedTokenizer", + batch_input: list[str], + input_kwargs: Optional[dict[str, Any]] = {}, + ) -> list[float]: + max_length: Optional[int] = input_kwargs.pop("max_length", None) + device = getattr(model.pretrained_model, "device", "cuda") + inputs: dict[str, torch.Tensor] = tokenizer( + batch_input, + padding=True, + truncation=True, + max_length=max_length or getattr(model.config, "max_position_embeddings", 1024), + return_tensors="pt", + add_special_tokens=False, + ).to(device) + values: torch.Tensor = model(**inputs, return_dict=True, use_cache=False)[-1] + scores = values.gather(dim=-1, index=(inputs["attention_mask"].sum(dim=-1, keepdim=True) - 1)) + return scores + + @override + async def chat( + self, + messages: list[dict[str, str]], + system: Optional[str] = None, + tools: Optional[str] = None, + images: Optional[list["ImageInput"]] = None, + videos: Optional[list["VideoInput"]] = None, + audios: Optional[list["AudioInput"]] = None, + **input_kwargs, + ) -> list["Response"]: + if not self.can_generate: + raise ValueError("The current model does not support `chat`.") + + input_args = ( + self.model, + self.tokenizer, + self.processor, + self.template, + self.generating_args, + messages, + system, + tools, + images, + videos, + audios, + input_kwargs, + ) + async with self.semaphore: + return await asyncio.to_thread(self._chat, *input_args) + + @override + async def stream_chat( + self, + messages: list[dict[str, str]], + system: Optional[str] = None, + tools: Optional[str] = None, + images: Optional[list["ImageInput"]] = None, + videos: Optional[list["VideoInput"]] = None, + audios: Optional[list["AudioInput"]] = None, + **input_kwargs, + ) -> AsyncGenerator[str, None]: + if not self.can_generate: + raise ValueError("The current model does not support `stream_chat`.") + + input_args = ( + self.model, + self.tokenizer, + self.processor, + self.template, + self.generating_args, + messages, + system, + tools, + images, + videos, + audios, + input_kwargs, + ) + async with self.semaphore: + stream = self._stream_chat(*input_args) + while True: + try: + yield await asyncio.to_thread(stream) + except StopAsyncIteration: + break + + @override + async def get_scores( + self, + batch_input: list[str], + **input_kwargs, + ) -> list[float]: + if self.can_generate: + raise ValueError("Cannot get scores using an auto-regressive model.") + + input_args = (self.model, self.tokenizer, batch_input, input_kwargs) + async with self.semaphore: + return await asyncio.to_thread(self._get_scores, *input_args) diff --git a/post-training/LLaMA-Factory/src/llamafactory/chat/sglang_engine.py b/post-training/LLaMA-Factory/src/llamafactory/chat/sglang_engine.py new file mode 100644 index 0000000..3fc3aeb --- /dev/null +++ b/post-training/LLaMA-Factory/src/llamafactory/chat/sglang_engine.py @@ -0,0 +1,275 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import atexit +import json +from collections.abc import AsyncGenerator, AsyncIterator, Sequence +from typing import TYPE_CHECKING, Any, Optional, Union + +import requests +from typing_extensions import override + +from ..data import get_template_and_fix_tokenizer +from ..extras import logging +from ..extras.constants import AUDIO_PLACEHOLDER, IMAGE_PLACEHOLDER, VIDEO_PLACEHOLDER, EngineName +from ..extras.misc import get_device_count, torch_gc +from ..extras.packages import is_sglang_available +from ..hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments +from ..model import load_config, load_tokenizer +from ..model.model_utils.quantization import QuantizationMethod +from .base_engine import BaseEngine, Response + + +if is_sglang_available(): + from sglang.utils import launch_server_cmd, terminate_process, wait_for_server # type: ignore + + +if TYPE_CHECKING: + from ..data.mm_plugin import AudioInput, ImageInput, VideoInput + + +logger = logging.get_logger(__name__) + + +class SGLangEngine(BaseEngine): + """Inference engine for SGLang models. + + This class wraps the SGLang engine to provide a consistent interface for text generation + that matches LLaMA Factory's requirements. It uses the SGLang HTTP server approach for + better interaction and performance. The engine launches a server process and communicates + with it via HTTP requests. + + For more details on the SGLang HTTP server approach, see: + https://docs.sglang.ai/backend/send_request.html + """ + + def __init__( + self, + model_args: "ModelArguments", + data_args: "DataArguments", + finetuning_args: "FinetuningArguments", + generating_args: "GeneratingArguments", + ) -> None: + self.name = EngineName.SGLANG + self.model_args = model_args + config = load_config(model_args) # may download model from ms hub + if getattr(config, "quantization_config", None): # gptq models should use float16 + quantization_config: dict[str, Any] = getattr(config, "quantization_config", None) + quant_method = quantization_config.get("quant_method", "") + if quant_method == QuantizationMethod.GPTQ and model_args.infer_dtype == "auto": + model_args.infer_dtype = "float16" + + self.can_generate = finetuning_args.stage == "sft" + tokenizer_module = load_tokenizer(model_args) + self.tokenizer = tokenizer_module["tokenizer"] + self.processor = tokenizer_module["processor"] + self.tokenizer.padding_side = "left" + self.template = get_template_and_fix_tokenizer(self.tokenizer, data_args) + self.template.mm_plugin.expand_mm_tokens = False # for sglang generate + self.generating_args = generating_args.to_dict() + + launch_cmd = [ + "python3 -m sglang.launch_server", + f"--model-path {model_args.model_name_or_path}", + f"--dtype {model_args.infer_dtype}", + f"--context-length {model_args.sglang_maxlen}", + f"--mem-fraction-static {model_args.sglang_mem_fraction}", + f"--tp-size {model_args.sglang_tp_size if model_args.sglang_tp_size != -1 else get_device_count() or 1}", + f"--download-dir {model_args.cache_dir}", + "--log-level error", + ] + launch_cmd = " ".join(launch_cmd) + logger.info_rank0(f"Starting SGLang server with command: {launch_cmd}") + try: + torch_gc() + self.server_process, port = launch_server_cmd(launch_cmd) + self.base_url = f"http://localhost:{port}" + atexit.register(self._cleanup_server) + + logger.info_rank0(f"Waiting for SGLang server to be ready at {self.base_url}") + wait_for_server(self.base_url, timeout=300) + logger.info_rank0(f"SGLang server initialized successfully at {self.base_url}") + try: + response = requests.get(f"{self.base_url}/get_model_info", timeout=5) + if response.status_code == 200: + model_info = response.json() + logger.info(f"SGLang server model info: {model_info}") + except Exception as e: + logger.debug(f"Note: could not get model info: {str(e)}") + + except Exception as e: + logger.error(f"Failed to start SGLang server: {str(e)}") + self._cleanup_server() # make sure to clean up any started process + raise RuntimeError(f"SGLang server initialization failed: {str(e)}.") + + def _cleanup_server(self): + r"""Clean up the server process when the engine is destroyed.""" + if hasattr(self, "server_process") and self.server_process: + try: + logger.info("Terminating SGLang server process") + terminate_process(self.server_process) + logger.info("SGLang server process terminated") + except Exception as e: + logger.warning(f"Error terminating SGLang server: {str(e)}") + + async def _generate( + self, + messages: list[dict[str, str]], + system: Optional[str] = None, + tools: Optional[str] = None, + images: Optional[list["ImageInput"]] = None, + videos: Optional[list["VideoInput"]] = None, + audios: Optional[list["AudioInput"]] = None, + **input_kwargs, + ) -> AsyncIterator[dict[str, Any]]: + if images is not None and not any(IMAGE_PLACEHOLDER in message["content"] for message in messages): + messages[0]["content"] = IMAGE_PLACEHOLDER * len(images) + messages[0]["content"] + + if videos is not None and not any(VIDEO_PLACEHOLDER in message["content"] for message in messages): + messages[0]["content"] = VIDEO_PLACEHOLDER * len(videos) + messages[0]["content"] + + if audios is not None and not any(AUDIO_PLACEHOLDER in message["content"] for message in messages): + messages[0]["content"] = AUDIO_PLACEHOLDER * len(audios) + messages[0]["content"] + + messages = self.template.mm_plugin.process_messages( + messages, images or [], videos or [], audios or [], self.processor + ) + paired_messages = messages + [{"role": "assistant", "content": ""}] + system = system or self.generating_args["default_system"] + prompt_ids, _ = self.template.encode_oneturn(self.tokenizer, paired_messages, system, tools) + prompt_length = len(prompt_ids) + + temperature: Optional[float] = input_kwargs.pop("temperature", None) + top_p: Optional[float] = input_kwargs.pop("top_p", None) + top_k: Optional[float] = input_kwargs.pop("top_k", None) + num_return_sequences: int = input_kwargs.pop("num_return_sequences", 1) + repetition_penalty: Optional[float] = input_kwargs.pop("repetition_penalty", None) + skip_special_tokens: Optional[bool] = input_kwargs.pop("skip_special_tokens", None) + max_length: Optional[int] = input_kwargs.pop("max_length", None) + max_new_tokens: Optional[int] = input_kwargs.pop("max_new_tokens", None) + stop: Optional[Union[str, list[str]]] = input_kwargs.pop("stop", None) + + if num_return_sequences != 1: + raise NotImplementedError("SGLang only supports n=1.") + + if "max_new_tokens" in self.generating_args: + max_tokens = self.generating_args["max_new_tokens"] + elif "max_length" in self.generating_args: + if self.generating_args["max_length"] > prompt_length: + max_tokens = self.generating_args["max_length"] - prompt_length + else: + max_tokens = 1 + + if max_length: + max_tokens = max_length - prompt_length if max_length > prompt_length else 1 + + if max_new_tokens: + max_tokens = max_new_tokens + + sampling_params = { + "temperature": temperature if temperature is not None else self.generating_args["temperature"], + "top_p": (top_p if top_p is not None else self.generating_args["top_p"]) or 1.0, # top_p must > 0 + "top_k": (top_k if top_k is not None else self.generating_args["top_k"]) or -1, # top_k must > 0 + "stop": stop, + "stop_token_ids": self.template.get_stop_token_ids(self.tokenizer), + "max_new_tokens": max_tokens, + "repetition_penalty": ( + repetition_penalty if repetition_penalty is not None else self.generating_args["repetition_penalty"] + ) + or 1.0, # repetition_penalty must > 0 + "skip_special_tokens": skip_special_tokens + if skip_special_tokens is not None + else self.generating_args["skip_special_tokens"], + } + + def stream_request(): + json_data = { + "input_ids": prompt_ids, + "sampling_params": sampling_params, + "stream": True, + } + response = requests.post(f"{self.base_url}/generate", json=json_data, stream=True) + if response.status_code != 200: + raise RuntimeError(f"SGLang server error: {response.status_code}, {response.text}") + + for chunk in response.iter_lines(decode_unicode=False): + chunk = str(chunk.decode("utf-8")) + if chunk == "data: [DONE]": + break + + if chunk and chunk.startswith("data:"): + yield json.loads(chunk[5:].strip("\n")) + + return await asyncio.to_thread(stream_request) + + @override + async def chat( + self, + messages: Sequence[dict[str, str]], + system: Optional[str] = None, + tools: Optional[str] = None, + images: Optional[Sequence["ImageInput"]] = None, + videos: Optional[Sequence["VideoInput"]] = None, + audios: Optional[Sequence["AudioInput"]] = None, + **input_kwargs, + ) -> list["Response"]: + final_output = None + generator = await self._generate(messages, system, tools, images, videos, audios, **input_kwargs) + for request_output in generator: + final_output = request_output + + results = [ + Response( + response_text=final_output["text"], + response_length=final_output["meta_info"]["completion_tokens"], + prompt_length=final_output["meta_info"]["prompt_tokens"], + finish_reason="stop" if final_output["meta_info"]["finish_reason"] == "stop" else "length", + ) + ] + return results + + @override + async def stream_chat( + self, + messages: list[dict[str, str]], + system: Optional[str] = None, + tools: Optional[str] = None, + images: Optional[list["ImageInput"]] = None, + videos: Optional[list["VideoInput"]] = None, + audios: Optional[list["AudioInput"]] = None, + **input_kwargs, + ) -> AsyncGenerator[str, None]: + generated_text = "" + generator = await self._generate(messages, system, tools, images, videos, audios, **input_kwargs) + for result in generator: + delta_text = result["text"][len(generated_text) :] + generated_text = result["text"] + yield delta_text + + @override + async def get_scores( + self, + batch_input: list[str], + **input_kwargs, + ) -> list[float]: + raise NotImplementedError("SGLang engine does not support `get_scores`.") + + def __del__(self): + r"""Ensure server is cleaned up when object is deleted.""" + self._cleanup_server() + try: + atexit.unregister(self._cleanup_server) + except Exception: + pass diff --git a/post-training/LLaMA-Factory/src/llamafactory/chat/vllm_engine.py b/post-training/LLaMA-Factory/src/llamafactory/chat/vllm_engine.py new file mode 100644 index 0000000..1100fc8 --- /dev/null +++ b/post-training/LLaMA-Factory/src/llamafactory/chat/vllm_engine.py @@ -0,0 +1,264 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid +from collections.abc import AsyncGenerator, AsyncIterator +from typing import TYPE_CHECKING, Any, Optional, Union + +from typing_extensions import override + +from ..data import get_template_and_fix_tokenizer +from ..extras import logging +from ..extras.constants import AUDIO_PLACEHOLDER, IMAGE_PLACEHOLDER, VIDEO_PLACEHOLDER, EngineName +from ..extras.misc import get_device_count +from ..extras.packages import is_vllm_available +from ..model import load_config, load_tokenizer +from ..model.model_utils.quantization import QuantizationMethod +from ..model.model_utils.visual import LlavaMultiModalProjectorForYiVLForVLLM +from .base_engine import BaseEngine, Response + + +if is_vllm_available(): + from vllm import AsyncEngineArgs, AsyncLLMEngine, RequestOutput, SamplingParams + from vllm.lora.request import LoRARequest + + +if TYPE_CHECKING: + from ..data.mm_plugin import AudioInput, ImageInput, VideoInput + from ..hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments + + +logger = logging.get_logger(__name__) + + +class VllmEngine(BaseEngine): + def __init__( + self, + model_args: "ModelArguments", + data_args: "DataArguments", + finetuning_args: "FinetuningArguments", + generating_args: "GeneratingArguments", + ) -> None: + self.name = EngineName.VLLM + self.model_args = model_args + config = load_config(model_args) # may download model from ms hub + if getattr(config, "quantization_config", None): # gptq models should use float16 + quantization_config: dict[str, Any] = getattr(config, "quantization_config", None) + quant_method = quantization_config.get("quant_method", "") + if quant_method == QuantizationMethod.GPTQ and model_args.infer_dtype == "auto": + model_args.infer_dtype = "float16" + + self.can_generate = finetuning_args.stage == "sft" + tokenizer_module = load_tokenizer(model_args) + self.tokenizer = tokenizer_module["tokenizer"] + self.processor = tokenizer_module["processor"] + self.tokenizer.padding_side = "left" + self.template = get_template_and_fix_tokenizer(self.tokenizer, data_args) + self.template.mm_plugin.expand_mm_tokens = False # for vllm generate + self.generating_args = generating_args.to_dict() + + engine_args = { + "model": model_args.model_name_or_path, + "trust_remote_code": model_args.trust_remote_code, + "download_dir": model_args.cache_dir, + "dtype": model_args.infer_dtype, + "max_model_len": model_args.vllm_maxlen, + "tensor_parallel_size": get_device_count() or 1, + "gpu_memory_utilization": model_args.vllm_gpu_util, + "disable_log_stats": True, + "disable_log_requests": True, + "enforce_eager": model_args.vllm_enforce_eager, + "enable_lora": model_args.adapter_name_or_path is not None, + "max_lora_rank": model_args.vllm_max_lora_rank, + } + if self.template.mm_plugin.__class__.__name__ != "BasePlugin": + engine_args["limit_mm_per_prompt"] = {"image": 4, "video": 2, "audio": 2} + + if isinstance(model_args.vllm_config, dict): + engine_args.update(model_args.vllm_config) + + if getattr(config, "is_yi_vl_derived_model", None): + import vllm.model_executor.models.llava + + logger.info_rank0("Detected Yi-VL model, applying projector patch.") + vllm.model_executor.models.llava.LlavaMultiModalProjector = LlavaMultiModalProjectorForYiVLForVLLM + + self.model = AsyncLLMEngine.from_engine_args(AsyncEngineArgs(**engine_args)) + if model_args.adapter_name_or_path is not None: + self.lora_request = LoRARequest("default", 1, model_args.adapter_name_or_path[0]) + else: + self.lora_request = None + + async def _generate( + self, + messages: list[dict[str, str]], + system: Optional[str] = None, + tools: Optional[str] = None, + images: Optional[list["ImageInput"]] = None, + videos: Optional[list["VideoInput"]] = None, + audios: Optional[list["AudioInput"]] = None, + **input_kwargs, + ) -> AsyncIterator["RequestOutput"]: + request_id = f"chatcmpl-{uuid.uuid4().hex}" + if images is not None and not any(IMAGE_PLACEHOLDER in message["content"] for message in messages): + messages[0]["content"] = IMAGE_PLACEHOLDER * len(images) + messages[0]["content"] + + if videos is not None and not any(VIDEO_PLACEHOLDER in message["content"] for message in messages): + messages[0]["content"] = VIDEO_PLACEHOLDER * len(videos) + messages[0]["content"] + + if audios is not None and not any(AUDIO_PLACEHOLDER in message["content"] for message in messages): + messages[0]["content"] = AUDIO_PLACEHOLDER * len(audios) + messages[0]["content"] + + messages = self.template.mm_plugin.process_messages( + messages, images or [], videos or [], audios or [], self.processor + ) + paired_messages = messages + [{"role": "assistant", "content": ""}] + system = system or self.generating_args["default_system"] + prompt_ids, _ = self.template.encode_oneturn(self.tokenizer, paired_messages, system, tools) + prompt_length = len(prompt_ids) + + temperature: Optional[float] = input_kwargs.pop("temperature", None) + top_p: Optional[float] = input_kwargs.pop("top_p", None) + top_k: Optional[float] = input_kwargs.pop("top_k", None) + num_return_sequences: int = input_kwargs.pop("num_return_sequences", 1) + repetition_penalty: Optional[float] = input_kwargs.pop("repetition_penalty", None) + length_penalty: Optional[float] = input_kwargs.pop("length_penalty", None) + skip_special_tokens: Optional[bool] = input_kwargs.pop("skip_special_tokens", None) + max_length: Optional[int] = input_kwargs.pop("max_length", None) + max_new_tokens: Optional[int] = input_kwargs.pop("max_new_tokens", None) + stop: Optional[Union[str, list[str]]] = input_kwargs.pop("stop", None) + + if length_penalty is not None: + logger.warning_rank0("Length penalty is not supported by the vllm engine yet.") + + if "max_new_tokens" in self.generating_args: + max_tokens = self.generating_args["max_new_tokens"] + elif "max_length" in self.generating_args: + if self.generating_args["max_length"] > prompt_length: + max_tokens = self.generating_args["max_length"] - prompt_length + else: + max_tokens = 1 + + if max_length: + max_tokens = max_length - prompt_length if max_length > prompt_length else 1 + + if max_new_tokens: + max_tokens = max_new_tokens + + sampling_params = SamplingParams( + n=num_return_sequences, + repetition_penalty=( + repetition_penalty if repetition_penalty is not None else self.generating_args["repetition_penalty"] + ) + or 1.0, # repetition_penalty must > 0 + temperature=temperature if temperature is not None else self.generating_args["temperature"], + top_p=(top_p if top_p is not None else self.generating_args["top_p"]) or 1.0, # top_p must > 0 + top_k=(top_k if top_k is not None else self.generating_args["top_k"]) or -1, # top_k must > 0 + stop=stop, + stop_token_ids=self.template.get_stop_token_ids(self.tokenizer), + max_tokens=max_tokens, + skip_special_tokens=skip_special_tokens + if skip_special_tokens is not None + else self.generating_args["skip_special_tokens"], + ) + + if images is not None: # add image features + multi_modal_data = { + "image": self.template.mm_plugin._regularize_images( + images, + image_max_pixels=self.model_args.image_max_pixels, + image_min_pixels=self.model_args.image_min_pixels, + )["images"] + } + elif videos is not None: + multi_modal_data = { + "video": self.template.mm_plugin._regularize_videos( + videos, + image_max_pixels=self.model_args.video_max_pixels, + image_min_pixels=self.model_args.video_min_pixels, + video_fps=self.model_args.video_fps, + video_maxlen=self.model_args.video_maxlen, + )["videos"] + } + elif audios is not None: + audio_data = self.template.mm_plugin._regularize_audios( + audios, + sampling_rate=self.model_args.audio_sampling_rate, + ) + multi_modal_data = {"audio": zip(audio_data["audios"], audio_data["sampling_rates"])} + else: + multi_modal_data = None + + result_generator = self.model.generate( + {"prompt_token_ids": prompt_ids, "multi_modal_data": multi_modal_data}, + sampling_params=sampling_params, + request_id=request_id, + lora_request=self.lora_request, + ) + return result_generator + + @override + async def chat( + self, + messages: list[dict[str, str]], + system: Optional[str] = None, + tools: Optional[str] = None, + images: Optional[list["ImageInput"]] = None, + videos: Optional[list["VideoInput"]] = None, + audios: Optional[list["AudioInput"]] = None, + **input_kwargs, + ) -> list["Response"]: + final_output = None + generator = await self._generate(messages, system, tools, images, videos, audios, **input_kwargs) + async for request_output in generator: + final_output = request_output + + results = [] + for output in final_output.outputs: + results.append( + Response( + response_text=output.text, + response_length=len(output.token_ids), + prompt_length=len(final_output.prompt_token_ids), + finish_reason=output.finish_reason, + ) + ) + + return results + + @override + async def stream_chat( + self, + messages: list[dict[str, str]], + system: Optional[str] = None, + tools: Optional[str] = None, + images: Optional[list["ImageInput"]] = None, + videos: Optional[list["VideoInput"]] = None, + audios: Optional[list["AudioInput"]] = None, + **input_kwargs, + ) -> AsyncGenerator[str, None]: + generated_text = "" + generator = await self._generate(messages, system, tools, images, videos, audios, **input_kwargs) + async for result in generator: + delta_text = result.outputs[0].text[len(generated_text) :] + generated_text = result.outputs[0].text + yield delta_text + + @override + async def get_scores( + self, + batch_input: list[str], + **input_kwargs, + ) -> list[float]: + raise NotImplementedError("vLLM engine does not support `get_scores`.") diff --git a/post-training/LLaMA-Factory/src/llamafactory/cli.py b/post-training/LLaMA-Factory/src/llamafactory/cli.py new file mode 100644 index 0000000..f9c32d4 --- /dev/null +++ b/post-training/LLaMA-Factory/src/llamafactory/cli.py @@ -0,0 +1,124 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess +import sys +from copy import deepcopy +from functools import partial + + +USAGE = ( + "-" * 70 + + "\n" + + "| Usage: |\n" + + "| llamafactory-cli api -h: launch an OpenAI-style API server |\n" + + "| llamafactory-cli chat -h: launch a chat interface in CLI |\n" + + "| llamafactory-cli eval -h: evaluate models |\n" + + "| llamafactory-cli export -h: merge LoRA adapters and export model |\n" + + "| llamafactory-cli train -h: train models |\n" + + "| llamafactory-cli webchat -h: launch a chat interface in Web UI |\n" + + "| llamafactory-cli webui: launch LlamaBoard |\n" + + "| llamafactory-cli version: show version info |\n" + + "-" * 70 +) + + +def main(): + from . import launcher + from .api.app import run_api + from .chat.chat_model import run_chat + from .eval.evaluator import run_eval + from .extras import logging + from .extras.env import VERSION, print_env + from .extras.misc import find_available_port, get_device_count, is_env_enabled, use_ray + from .train.tuner import export_model, run_exp + from .webui.interface import run_web_demo, run_web_ui + + logger = logging.get_logger(__name__) + + WELCOME = ( + "-" * 58 + + "\n" + + f"| Welcome to LLaMA Factory, version {VERSION}" + + " " * (21 - len(VERSION)) + + "|\n|" + + " " * 56 + + "|\n" + + "| Project page: https://github.com/hiyouga/LLaMA-Factory |\n" + + "-" * 58 + ) + + COMMAND_MAP = { + "api": run_api, + "chat": run_chat, + "env": print_env, + "eval": run_eval, + "export": export_model, + "train": run_exp, + "webchat": run_web_demo, + "webui": run_web_ui, + "version": partial(print, WELCOME), + "help": partial(print, USAGE), + } + + command = sys.argv.pop(1) if len(sys.argv) >= 1 else "help" + if command == "train" and (is_env_enabled("FORCE_TORCHRUN") or (get_device_count() > 1 and not use_ray())): + # launch distributed training + nnodes = os.getenv("NNODES", "1") + node_rank = os.getenv("NODE_RANK", "0") + nproc_per_node = os.getenv("NPROC_PER_NODE", str(get_device_count())) + master_addr = os.getenv("MASTER_ADDR", "127.0.0.1") + master_port = os.getenv("MASTER_PORT", str(find_available_port())) + logger.info_rank0(f"Initializing {nproc_per_node} distributed tasks at: {master_addr}:{master_port}") + if int(nnodes) > 1: + print(f"Multi-node training enabled: num nodes: {nnodes}, node rank: {node_rank}") + + env = deepcopy(os.environ) + if is_env_enabled("OPTIM_TORCH", "1"): + # optimize DDP, see https://zhuanlan.zhihu.com/p/671834539 + env["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" + env["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1" + + # NOTE: DO NOT USE shell=True to avoid security risk + process = subprocess.run( + ( + "torchrun --nnodes {nnodes} --node_rank {node_rank} --nproc_per_node {nproc_per_node} " + "--master_addr {master_addr} --master_port {master_port} {file_name} {args}" + ) + .format( + nnodes=nnodes, + node_rank=node_rank, + nproc_per_node=nproc_per_node, + master_addr=master_addr, + master_port=master_port, + file_name=launcher.__file__, + args=" ".join(sys.argv[1:]), + ) + .split(), + env=env, + check=True, + ) + sys.exit(process.returncode) + elif command in COMMAND_MAP: + COMMAND_MAP[command]() + else: + print(f"Unknown command: {command}.\n{USAGE}") + + +if __name__ == "__main__": + from multiprocessing import freeze_support + + freeze_support() + main() diff --git a/post-training/LLaMA-Factory/src/llamafactory/eval/__init__.py b/post-training/LLaMA-Factory/src/llamafactory/eval/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/post-training/LLaMA-Factory/src/llamafactory/eval/evaluator.py b/post-training/LLaMA-Factory/src/llamafactory/eval/evaluator.py new file mode 100644 index 0000000..7729c59 --- /dev/null +++ b/post-training/LLaMA-Factory/src/llamafactory/eval/evaluator.py @@ -0,0 +1,158 @@ +# Copyright 2025 the LlamaFactory team. +# +# This code is inspired by the Dan's test library. +# https://github.com/hendrycks/test/blob/master/evaluate_flan.py +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2020 Dan Hendrycks +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import json +import os +from typing import TYPE_CHECKING, Any, Optional + +import numpy as np +import torch +from datasets import load_dataset +from tqdm import tqdm, trange +from transformers.utils import cached_file + +from ..data import get_template_and_fix_tokenizer +from ..extras.constants import CHOICES, SUBJECTS +from ..hparams import get_eval_args +from ..model import load_model, load_tokenizer +from .template import get_eval_template + + +if TYPE_CHECKING: + from numpy.typing import NDArray + + +class Evaluator: + def __init__(self, args: Optional[dict[str, Any]] = None) -> None: + self.model_args, self.data_args, self.eval_args, finetuning_args = get_eval_args(args) + self.tokenizer = load_tokenizer(self.model_args)["tokenizer"] + self.tokenizer.padding_side = "right" # avoid overflow issue in batched inference for llama2 + self.template = get_template_and_fix_tokenizer(self.tokenizer, self.data_args) + self.model = load_model(self.tokenizer, self.model_args, finetuning_args) + self.eval_template = get_eval_template(self.eval_args.lang) + self.choice_inputs = [self.tokenizer.encode(ch, add_special_tokens=False)[-1] for ch in CHOICES] + + @torch.inference_mode() + def batch_inference(self, batch_input: dict[str, "torch.Tensor"]) -> list[str]: + logits = self.model(**batch_input).logits + lengths = torch.sum(batch_input["attention_mask"], dim=-1) + word_probs = torch.stack([logits[i, lengths[i] - 1] for i in range(len(lengths))], dim=0) + choice_probs = torch.nn.functional.softmax(word_probs[:, self.choice_inputs], dim=-1).detach() + return [chr(ord("A") + offset.item()) for offset in torch.argmax(choice_probs, dim=-1)] + + def eval(self) -> None: + eval_task = self.eval_args.task.split("_")[0] + eval_split = self.eval_args.task.split("_")[1] + + mapping = cached_file( + path_or_repo_id=os.path.join(self.eval_args.task_dir, eval_task), + filename="mapping.json", + cache_dir=self.model_args.cache_dir, + token=self.model_args.hf_hub_token, + ) + + with open(mapping, encoding="utf-8") as f: + categorys: dict[str, dict[str, str]] = json.load(f) + + category_corrects = {subj: np.array([], dtype="bool") for subj in SUBJECTS} + pbar = tqdm(categorys.keys(), desc="Processing subjects", position=0) + results = {} + for subject in pbar: + dataset = load_dataset( + path=os.path.join(self.eval_args.task_dir, eval_task), + name=subject, + cache_dir=self.model_args.cache_dir, + download_mode=self.eval_args.download_mode, + token=self.model_args.hf_hub_token, + trust_remote_code=self.model_args.trust_remote_code, + ) + pbar.set_postfix_str(categorys[subject]["name"]) + inputs, outputs, labels = [], [], [] + for i in trange(len(dataset[eval_split]), desc="Formatting batches", position=1, leave=False): + support_set = ( + dataset["train"].shuffle().select(range(min(self.eval_args.n_shot, len(dataset["train"])))) + ) + messages = self.eval_template.format_example( + target_data=dataset[eval_split][i], + support_set=support_set, + subject_name=categorys[subject]["name"], + ) + + input_ids, _ = self.template.encode_oneturn(tokenizer=self.tokenizer, messages=messages) + inputs.append({"input_ids": input_ids, "attention_mask": [1] * len(input_ids)}) + labels.append(messages[-1]["content"]) + + for i in trange( + 0, len(inputs), self.eval_args.batch_size, desc="Predicting batches", position=1, leave=False + ): + batch_input = self.tokenizer.pad( + inputs[i : i + self.eval_args.batch_size], return_attention_mask=True, return_tensors="pt" + ).to(self.model.device) + preds = self.batch_inference(batch_input) + outputs += preds + + corrects = np.array(outputs) == np.array(labels) + category_name = categorys[subject]["category"] + category_corrects[category_name] = np.concatenate([category_corrects[category_name], corrects], axis=0) + category_corrects["Average"] = np.concatenate([category_corrects["Average"], corrects], axis=0) + results[subject] = {str(i): outputs[i] for i in range(len(outputs))} + + pbar.close() + self._save_results(category_corrects, results) + + def _save_results(self, category_corrects: dict[str, "NDArray"], results: dict[str, dict[int, str]]) -> None: + score_info = "\n".join( + [ + f"{category_name:>15}: {100 * np.mean(category_correct):.2f}" + for category_name, category_correct in category_corrects.items() + if len(category_correct) + ] + ) + print(score_info) + if self.eval_args.save_dir is not None: + os.makedirs(self.eval_args.save_dir, exist_ok=False) + with open(os.path.join(self.eval_args.save_dir, "results.json"), "w", encoding="utf-8", newline="\n") as f: + json.dump(results, f, indent=2) + + with open(os.path.join(self.eval_args.save_dir, "results.log"), "w", encoding="utf-8", newline="\n") as f: + f.write(score_info) + + +def run_eval() -> None: + Evaluator().eval() diff --git a/post-training/LLaMA-Factory/src/llamafactory/eval/template.py b/post-training/LLaMA-Factory/src/llamafactory/eval/template.py new file mode 100644 index 0000000..5742469 --- /dev/null +++ b/post-training/LLaMA-Factory/src/llamafactory/eval/template.py @@ -0,0 +1,79 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass + +from ..data import Role +from ..extras.constants import CHOICES + + +@dataclass +class EvalTemplate: + system: str + choice: str + answer: str + + def _parse_example(self, example: dict[str, str]) -> tuple[str, str]: + r"""Parse eval example. + + input: a dict with keys {"question", "A", "B", "C", "D", "answer"} + output: a tuple of (prompt, response). + """ + candidates = [self.choice.format(choice=ch, content=example[ch]) for ch in CHOICES if ch in example] + return "".join([example["question"]] + candidates + [self.answer]), example["answer"] + + def format_example( + self, target_data: dict[str, str], support_set: list[dict[str, str]], subject_name: str + ) -> list[dict[str, str]]: + r"""Convert dataset examples to messages.""" + messages = [] + for k in range(len(support_set)): + prompt, response = self._parse_example(support_set[k]) + messages.append({"role": Role.USER.value, "content": prompt}) + messages.append({"role": Role.ASSISTANT.value, "content": response}) + + prompt, response = self._parse_example(target_data) + messages.append({"role": Role.USER.value, "content": prompt}) + messages.append({"role": Role.ASSISTANT.value, "content": response}) + messages[0]["content"] = self.system.format(subject=subject_name) + messages[0]["content"] + return messages + + +eval_templates: dict[str, "EvalTemplate"] = {} + + +def _register_eval_template(name: str, system: str, choice: str, answer: str) -> None: + eval_templates[name] = EvalTemplate(system=system, choice=choice, answer=answer) + + +def get_eval_template(name: str) -> "EvalTemplate": + eval_template = eval_templates.get(name, None) + assert eval_template is not None, f"Template {name} does not exist." + return eval_template + + +_register_eval_template( + name="en", + system="The following are multiple choice questions (with answers) about {subject}.\n\n", + choice="\n{choice}. {content}", + answer="\nAnswer:", +) + + +_register_eval_template( + name="zh", + system="以下是中国关于{subject}考试的单项选择题,请选出其中的正确答案。\n\n", + choice="\n{choice}. {content}", + answer="\n答案:", +) diff --git a/post-training/LLaMA-Factory/src/llamafactory/extras/__init__.py b/post-training/LLaMA-Factory/src/llamafactory/extras/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/post-training/LLaMA-Factory/src/llamafactory/extras/constants.py b/post-training/LLaMA-Factory/src/llamafactory/extras/constants.py new file mode 100644 index 0000000..025a8bb --- /dev/null +++ b/post-training/LLaMA-Factory/src/llamafactory/extras/constants.py @@ -0,0 +1,2918 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from collections import OrderedDict, defaultdict +from enum import Enum, unique +from typing import Optional + +from peft.utils import SAFETENSORS_WEIGHTS_NAME as SAFE_ADAPTER_WEIGHTS_NAME +from peft.utils import WEIGHTS_NAME as ADAPTER_WEIGHTS_NAME +from transformers.utils import SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME + + +AUDIO_PLACEHOLDER = os.getenv("AUDIO_PLACEHOLDER", "