commit 18c42e67df28bb6c7f5dc847595637327919e5ea Author: chenxl Date: Sat Jul 27 16:06:58 2024 +0800 Initial commit diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..718ea55 --- /dev/null +++ b/.gitignore @@ -0,0 +1,17 @@ +__pycache__ +build +.vscode +*.so +*.cache +server.db +logs +node_modules +*.nsys-rep +.vs/ +*pycache* +*build/ +*/third_party/* +.DS_Store +compile_commands.json +*.egg-info* +*dist/ \ No newline at end of file diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..68242f3 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,6 @@ +[submodule "third_party/llama.cpp"] + path = third_party/llama.cpp + url = https://github.com/ggerganov/llama.cpp.git +[submodule "third_party/pybind11"] + path = third_party/pybind11 + url = https://github.com/pybind/pybind11.git diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 0000000..1ed4ddb --- /dev/null +++ b/.pylintrc @@ -0,0 +1,6 @@ +[MASTER] +extension-pkg-whitelist=pydantic +max-line-length=120 + +[MESSAGES CONTROL] +disable=missing-function-docstring \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..dac9b32 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,11 @@ +graft third_party +graft ktransformers +graft local_chat.py +include LICENSE README.md +prune ktransformers/website +prune ktransformers/logs +prune ktransformers.egg-info +prune third_party/llama.cpp/models +graft ktransformers/website/dist +global-exclude __pycache__ +include KTransformersOps.*.so diff --git a/README.md b/README.md new file mode 100644 index 0000000..95a8c46 --- /dev/null +++ b/README.md @@ -0,0 +1,284 @@ +
+ +

+ + + DeepSeek-Coder-V2 Score + + +

+

A Flexible Framework for Experiencing Cutting-edge LLM Inference Optimizations

+ ๐Ÿ”ฅ Show Cases | ๐Ÿš€ Quick Start | ๐Ÿ“ƒ Tutorial | ๐Ÿ’ฌ Discussion +
+ + +

๐ŸŽ‰ Introduction

+KTransformers, pronounced as Quick Transformers, is designed to enhance your ๐Ÿค— Transformers experience with advanced kernel optimizations and placement/parallelism strategies. +

+KTransformers is a flexible, Python-centric framework designed with extensibility at its core. +By implementing and injecting an optimized module with a single line of code, users gain access to a Transformers-compatible +interface, RESTful APIs compliant with OpenAI and Ollama, and even a simplified ChatGPT-like web UI. +

+Our vision for KTransformers is to serve as a flexible platform for experimenting with innovative LLM inference optimizations. Please let us know if you need any other features. + + +

๐Ÿ”ฅ Show Cases

+

GPT-4-level Local VSCode Copilot on a Desktop with only 24GB VRAM

+

+ + https://github.com/user-attachments/assets/3f85780e-aa53-4d2f-91b2-5585c8dade85 + +

+ +- **Local 236B DeepSeek-Coder-V2:** Running its Q4_K_M version using only 21GB VRAM and 136GB DRAM, attainable on a local desktop machine, which scores even better than GPT4-0613 in [BigCodeBench](https://huggingface.co/blog/leaderboard-bigcodebench). + +

+ + DeepSeek-Coder-V2 Score + +

+ +- **Faster Speed:** Achieving 126 tokens/s for 2K prompt prefill and 13.6 tokens/s for generation through MoE offloading and injecting advanced kernels from [Llamafile](https://github.com/Mozilla-Ocho/llamafile/tree/main) and [Marlin](https://github.com/IST-DASLab/marlin). +- **VSCode Integration:** Wrapped into an OpenAI and Ollama compatible API for seamless integration as a backend for [Tabby](https://github.com/TabbyML/tabby) and various other frontends. + +

+ + + https://github.com/user-attachments/assets/e6e27cb3-8372-44e6-8f1f-34402eae56c1 + +

+ + +More advanced features will coming soon, so stay tuned! + +

๐Ÿš€ Quick Start

+ +

Preparation

+Some preparation: + +- CUDA 12.1 and above, if you didn't have it yet, you may install from [here](https://developer.nvidia.com/cuda-downloads). + + +- Linux-x86_64 with gcc, g++ and cmake + ```sh + sudo apt-get update + sudo apt-get install gcc g++ cmake ninja-build + ``` +- We recommend using [Conda](https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh) to create a virtual environment with Python=3.11 to run our program. + ```sh + conda create --name ktransformers python=3.11 + conda activate ktransformers # you may need to run โ€˜conda initโ€™ and reopen shell first + ``` + + Download source code: + ```sh + git clone https://github.com/kvcache-ai/ktransformers.git + cd ktransformers + git submodule init + git submodule update + ``` + +

Local Chat

+We provide a simple command-line local chat Python script that you can run for testing. + + > Note that this is a very simple test tool only support one round chat without any memory about last input, if you want to try full ability of the model, you may go to [RESTful API and Web UI](#id_666). We use the DeepSeek-V2-Lite-Chat-GGUF model as an example here. But we alse support other models, you can replace it with any other model that you want to test. + +

Install

+ +```sh +bash install.sh +``` + +

Run Example

+ +```shell +# Begin from root of your cloned repo! +# Begin from root of your cloned repo!! +# Begin from root of your cloned repo!!! + +# Download mzwing/DeepSeek-V2-Lite-Chat-GGUF from huggingface +mkdir DeepSeek-V2-Lite-Chat-GGUF +cd DeepSeek-V2-Lite-Chat-GGUF + +wget https://huggingface.co/mzwing/DeepSeek-V2-Lite-Chat-GGUF/resolve/main/DeepSeek-V2-Lite-Chat.Q4_K_M.gguf -O DeepSeek-V2-Lite-Chat.Q4_K_M.gguf + +cd .. # Move to repo's root dir + +# Start local chat +python ktransformers/local_chat.py --model_path deepseek-ai/DeepSeek-V2-Lite-Chat --gguf_path ./DeepSeek-V2-Lite-Chat-GGUF + +# If you see โ€œOSError: We couldn't connect to 'https://huggingface.co' to load this fileโ€, try๏ผš +# GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/deepseek-ai/DeepSeek-V2-Lite +# python ktransformers/local_chat.py --model_path ./DeepSeek-V2-Lite --gguf_path ./DeepSeek-V2-Lite-Chat-GGUF +``` + + +It features the following arguments: + +- `--model_path` (required): Name of the model (such as "deepseek-ai/DeepSeek-V2-Lite-Chat" which will automatically download configs from [Hugging Face](https://huggingface.co/deepseek-ai/DeepSeek-V2-Lite)). Or if you already got local files you may directly use that path to initialize the model. + >Note: .safetensors files are not required in the directory. We only need config files to build model and tokenizer. +- `--gguf_path` (required): Path of a directory containing GGUF files which could that can be downloaded from [Hugging Face](https://huggingface.co/mzwing/DeepSeek-V2-Lite-Chat-GGUF/tree/main) (we only support q4_k_m and q8_0 for now, more formats are coming soon). +- `--optimize_rule_path` (required except for Qwen2Moe and DeepSeek-V2): Path of YAML file containing optimize rules. There are two rule files pre-written in the [ktransformers/optimize/optimize_rules](ktransformers/optimize/optimize_rules) directory for optimizing DeepSeek-V2 and Qwen2-57B-A14, two SOTA MoE models. +- `--max_new_tokens`: Int (default=1000). Maximum number of new tokens to generate. +- `--cpu_infer`: Int (default=10). The number of CPUs used for inference. Should ideally be set to the (total number of cores - 2). + +

Supported Model

+ +| Model Name | Model Size | VRAM | Minimum DRAM | Recommended DRAM | +| ---- | ---- | ---- | ---- | ---- | +| DeepSeek-V2-q4_k_m | 133G | 24G | 136G | 192G | +| Qwen2-57B-A14B-Instruct-q4_k_m | 33G | 8G | 34G | 64G | +| DeepSeek-V2-Lite-q4_k_m | 9.7G | 3G | 13G | 16G | + + +More will come soon. Please let us know which models you are most interested in. + +Be aware that you need to be subject to their corresponding model licenses when using [DeepSeek](https://huggingface.co/deepseek-ai/DeepSeek-V2/blob/main/LICENSE) and [QWen](https://huggingface.co/Qwen/Qwen2-72B-Instruct/blob/main/LICENSE). + +
+ Click To Show how to run other examples + + +* Qwen2-57B + +```sh +pip install flash_attn # For Qwen2 + +mkdir Qwen2-57B-GGUF && cd Qwen2-57B-GGUF + +wget https://huggingface.co/Qwen/Qwen2-57B-A14B-Instruct-GGUF/resolve/main/qwen2-57b-a14b-instruct-q4_k_m.gguf?download=true -O qwen2-57b-a14b-instruct-q4_k_m.gguf + +cd .. + +python ktransformers/local_chat.py --model_name Qwen/Qwen2-57B-A14B-Instruct --gguf_path ./Qwen2-57B-GGUF + +# If you see โ€œOSError: We couldn't connect to 'https://huggingface.co' to load this fileโ€, try๏ผš +# GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/Qwen/Qwen2-57B-A14B-Instruct +# python ktransformers/local_chat.py --model_path ./Qwen2-57B-A14B-Instruct --gguf_path ./DeepSeek-V2-Lite-Chat-GGUF +``` + +* DeepseekV2 +```sh +mkdir DeepSeek-V2-Chat-0628-GGUF && cd DeepSeek-V2-Chat-0628-GGUF +# Download weights +wget https://huggingface.co/bartowski/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628-Q4_K_M/DeepSeek-V2-Chat-0628-Q4_K_M-00001-of-00004.gguf -o DeepSeek-V2-Chat-0628-Q4_K_M-00001-of-00004.gguf +wget https://huggingface.co/bartowski/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628-Q4_K_M/DeepSeek-V2-Chat-0628-Q4_K_M-00002-of-00004.gguf -o DeepSeek-V2-Chat-0628-Q4_K_M-00002-of-00004.gguf +wget https://huggingface.co/bartowski/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628-Q4_K_M/DeepSeek-V2-Chat-0628-Q4_K_M-00003-of-00004.gguf -o DeepSeek-V2-Chat-0628-Q4_K_M-00003-of-00004.gguf +wget https://huggingface.co/bartowski/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628-Q4_K_M/DeepSeek-V2-Chat-0628-Q4_K_M-00004-of-00004.gguf -o DeepSeek-V2-Chat-0628-Q4_K_M-00004-of-00004.gguf + +cd .. + +python ktransformers/local_chat.py --model_name deepseek-ai/DeepSeek-V2-Chat-0628 --gguf_path ./DeepSeek-V2-Chat-0628-GGUF + +# If you see โ€œOSError: We couldn't connect to 'https://huggingface.co' to load this fileโ€, try๏ผš +# GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/deepseek-ai/DeepSeek-V2-Chat-0628 +# python ktransformers/local_chat.py --model_path ./DeepSeek-V2-Chat-0628 --gguf_path ./DeepSeek-V2-Chat-0628-GGUF +``` + +| model name | weights download link | +|----------|----------| +| Qwen2-57B | [Qwen2-57B-A14B-gguf-Q4K-M](https://huggingface.co/Qwen/Qwen2-57B-A14B-Instruct-GGUF/tree/main) | +| DeepseekV2-coder |[DeepSeek-Coder-V2-Instruct-gguf-Q4K-M](https://huggingface.co/LoneStriker/DeepSeek-Coder-V2-Instruct-GGUF/tree/main) | +| DeepseekV2-chat |[DeepSeek-V2-Chat-gguf-Q4K-M](https://huggingface.co/bullerwins/DeepSeek-V2-Chat-0628-GGUF/tree/main) | +| DeepseekV2-lite | [DeepSeek-V2-Lite-Chat-GGUF-Q4K-M](https://huggingface.co/mzwing/DeepSeek-V2-Lite-Chat-GGUF/tree/main) | + +
+ + + + +

RESTful API and Web UI

+ +

Install

+ +[Optional] If you want to run with website, please [compile the website](./doc/en/api/server/website.md) before execute ```pip install .``` + +Install ktransformers with source. +``` +pip install -r requirements-local_chat.txt +pip install . --no-build-isolation +``` + +Start without website: + +```sh +ktransformers --model_path deepseek-ai/DeepSeek-V2-Lite-Chat --gguf_path /path/to/DeepSeek-V2-Lite-Chat-GGUF --port 10002 +``` +Start with website: +```sh +ktransformers --model_path deepseek-ai/DeepSeek-V2-Lite-Chat --gguf_path /path/to/DeepSeek-V2-Lite-Chat-GGUF --port 10002 --web True +``` +Or you want to start server with transformers, the model_path should include safetensors +```bash +ktransformers --type transformers --model_path /mnt/data/model/Qwen2-0.5B-Instruct --port 10002 --web True +``` + +Access website with url [http://localhost:10002/web/index.html#/chat](http://localhost:10002/web/index.html#/chat) : + +

+ + Web UI + +

+ +More information about the RESTful API server can be found [here](doc/en/api/server/server.md). You can also find an example of integrating with Tabby [here](doc/en/api/server/tabby.md). + + +

๐Ÿ“ƒ Brief Injection Tutorial

+At the heart of KTransformers is a user-friendly, template-based injection framework. +This allows researchers to easily replace original torch modules with optimized variants. It also simplifies the process of combining multiple optimizations, allowing the exploration of their synergistic effects. + +
+

+ + Inject-Struction + +

+ +Given that vLLM already serves as a great framework for large-scale deployment optimizations, KTransformers is particularly focused on local deployments that are constrained by limited resources. We pay special attention to heterogeneous computing opportunities, such as GPU/CPU offloading of quantized models. For example, we support the efficient Llamafile and Marlin kernels for CPU and GPU, respectively. More details can be found here. + +

Example Usage

+To utilize the provided kernels, users only need to create a YAML-based injection template and add the call to `optimize_and_load_gguf` before using the Transformers model. + +```python +with torch.device("meta"): + model = AutoModelForCausalLM.from_config(config, trust_remote_code=True) +optimize_and_load_gguf(model, optimize_rule_path, gguf_path, config) +... +generated = prefill_and_generate(model, tokenizer, input_tensor.cuda(), max_new_tokens=1000) +``` + +In this example, the AutoModel is first initialized on the meta device to avoid occupying any memory resources. Then, `optimize_and_load_gguf` iterates through all sub-modules of the model, matches rules specified in your YAML rule file, and replaces them with advanced modules as specified. + +After injection, the original `generate` interface is available, but we also provide a compatible `prefill_and_generate` method, which enables further optimizations like CUDAGraph to improve generation speed. + +

YAML Template

+Below is an example of a YAML template for replacing all original Linear modules with Marlin, an advanced 4-bit quantization kernel. + +```yaml +- match: + name: "^model\\.layers\\..*$" # regular expression + class: torch.nn.Linear # only match modules matching name and class simultaneously + replace: + class: ktransformers.operators.linear.KTransformerLinear # optimized Kernel on quantized data types + device: "cpu" # which devices to load this module when initializing + kwargs: + generate_device: "cuda" + generate_linear_type: "QuantizedLinearMarlin" +``` + +Each rule in the YAML file has two parts: `match` and `replace`. The `match` part specifies which module should be replaced, and the `replace` part specifies the module to be injected into the model along with the initialization keywords. + +You can find example rule templates for optimizing DeepSeek-V2 and Qwen2-57B-A14, two SOTA MoE models, in the [ktransformers/optimize/optimize_rules](ktransformers/optimize/optimize_rules) directory. These templates are used to power the `local_chat.py` demo. + +A detailed description of the injection using DeepSeek-V2 as an example is given [here](doc/en/deepseek-v2-injection.md). + +

Acknowledgment and Contributors

+ +The development of KTransformer is based on the flexible and versatile framework provided by Transformers. We also benefit from advanced kernels such as GGUF/GGML, Llamafile, and Marlin. We are planning to contribute back to the community by upstreaming our modifications. + +KTransformer is actively maintained and developed by contributors from the MADSys group at Tsinghua University and members from Approaching.AI. We welcome new contributors to join us in making KTransformer faster and easier to use. diff --git a/doc/assets/BigCodeBench.png b/doc/assets/BigCodeBench.png new file mode 100644 index 0000000..474c23f Binary files /dev/null and b/doc/assets/BigCodeBench.png differ diff --git a/doc/assets/DeepSeek-on-KTransformers.PNG b/doc/assets/DeepSeek-on-KTransformers.PNG new file mode 100644 index 0000000..455f210 Binary files /dev/null and b/doc/assets/DeepSeek-on-KTransformers.PNG differ diff --git a/doc/assets/InjectStruction.png b/doc/assets/InjectStruction.png new file mode 100644 index 0000000..40ce6ce Binary files /dev/null and b/doc/assets/InjectStruction.png differ diff --git a/doc/assets/KTransformers.png b/doc/assets/KTransformers.png new file mode 100644 index 0000000..a7d3673 Binary files /dev/null and b/doc/assets/KTransformers.png differ diff --git a/doc/assets/cpuinfer.png b/doc/assets/cpuinfer.png new file mode 100644 index 0000000..e1d7a51 Binary files /dev/null and b/doc/assets/cpuinfer.png differ diff --git a/doc/assets/website.png b/doc/assets/website.png new file mode 100644 index 0000000..450462a Binary files /dev/null and b/doc/assets/website.png differ diff --git a/doc/en/api/server/api.md b/doc/en/api/server/api.md new file mode 100644 index 0000000..81eec43 --- /dev/null +++ b/doc/en/api/server/api.md @@ -0,0 +1,108 @@ +# API + +- [OpenAI ChatCompletion](#openai-chatcompletion) +- [Ollama ChatCompletion](#ollama-chatcompletion) +- [OpenAI Assistant](#openai-assistant) + +## OpenAI ChatCompletion +```bash +POST /v1/chat/completions + +``` +Generate responses based on the selected model. + +### Parameters +- `messages`: An array of `message` representing all historical messages. A `message` can be from a user or model (assistant) and includes: + + - `role`: Either `user` or `assistant`, indicating the creator of this message. + - `content`: The message from the user or model. +- `model`: The name of the selected model +- `stream`: Either true or false. Indicates whether to use streaming response. If true, model inference results are returned via HTTP event stream. + +### Response +- Streaming response: An event stream, each event contains a `chat.completion.chunk`. `chunk.choices[0].delta.content` is the incremental output returned by the model each time. +- Non-streaming response: Not supported yet. + + + +### Example + +```bash +curl -X 'POST' \ + 'http://localhost:9112/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "messages": [ + { + "content": "tell a joke", + "role": "user" + } + ], + "model": "Meta-Llama-3-8B-Instruct", + "stream": true +}' +``` + +```bash +data:{"id":"c30445e8-1061-4149-a101-39b8222e79e1","object":"chat.completion.chunk","created":1720511671,"model":"not implmented","system_fingerprint":"not implmented","usage":null,"choices":[{"index":0,"delta":{"content":"Why ","role":"assistant","name":null},"logprobs":null,"finish_reason":null}]} + +data:{"id":"c30445e8-1061-4149-a101-39b8222e79e1","object":"chat.completion.chunk","created":1720511671,"model":"not implmented","system_fingerprint":"not implmented","usage":null,"choices":[{"index":0,"delta":{"content":"","role":"assistant","name":null},"logprobs":null,"finish_reason":null}]} + +data:{"id":"c30445e8-1061-4149-a101-39b8222e79e1","object":"chat.completion.chunk","created":1720511671,"model":"not implmented","system_fingerprint":"not implmented","usage":null,"choices":[{"index":0,"delta":{"content":"couldn't ","role":"assistant","name":null},"logprobs":null,"finish_reason":null}]} + +... + +data:{"id":"c30445e8-1061-4149-a101-39b8222e79e1","object":"chat.completion.chunk","created":1720511671,"model":"not implmented","system_fingerprint":"not implmented","usage":null,"choices":[{"index":0,"delta":{"content":"two-tired!","role":"assistant","name":null},"logprobs":null,"finish_reason":null}]} + +event: done +data: [DONE] +``` + + + +## Ollama ChatCompletion + +```bash +POST /api/generate +``` + +Generate responses using the selected model. + +### Parameters +- `prompt`: A string representing the input prompt. +- `model`: The name of the selected model +- `stream`: Either true or false. Indicates whether to use streaming responses. If true, returns the model inference results in the form of an HTTP event stream. + +### Response +- Streaming response: A stream of JSON responses, each line is a JSON. + - `response`: The incremental result of the model completion. + - `done`: Whether the inference has finished. +- Non-streaming response: Not yet supported. + +### ไพ‹ๅญ + +```bash +curl -X 'POST' \ + 'http://localhost:9112/api/generate' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "model": "Meta-Llama-3-8B-Instruct", + "prompt": "tell me a joke", + "stream": true +}' +``` + +```bash +{"model":"Meta-Llama-3-8B-Instruct","created_at":"2024-07-09 08:13:11.686513","response":"I'll ","done":false} +{"model":"Meta-Llama-3-8B-Instruct","created_at":"2024-07-09 08:13:11.729214","response":"give ","done":false} + +... + +{"model":"Meta-Llama-3-8B-Instruct","created_at":"2024-07-09 08:13:33.955475","response":"for","done":false} +{"model":"Meta-Llama-3-8B-Instruct","created_at":"2024-07-09 08:13:33.956795","response":"","done":true} +``` + + + diff --git a/doc/en/api/server/run-tabby.png b/doc/en/api/server/run-tabby.png new file mode 100644 index 0000000..57d962d Binary files /dev/null and b/doc/en/api/server/run-tabby.png differ diff --git a/doc/en/api/server/server-arch.png b/doc/en/api/server/server-arch.png new file mode 100644 index 0000000..093b914 Binary files /dev/null and b/doc/en/api/server/server-arch.png differ diff --git a/doc/en/api/server/server.md b/doc/en/api/server/server.md new file mode 100644 index 0000000..5541b9a --- /dev/null +++ b/doc/en/api/server/server.md @@ -0,0 +1,37 @@ +# Backend Services (Server) +The Server offers fast heterogeneous inference capabilities of ktransformers through an API for external usage. + +Server architecture + +## API + +The Server provides model inference services externally through a RESTful API, with two methods of interaction: ChatCompletion and Assistant. + +- The ChatCompletion interface requires users to provide all historical dialogues at once, after which the model responds. AI service providers (such as [OpenAI](https://platform.openai.com/docs/api-reference/chat/create)) and local inference frameworks (such as [Ollama](https://github.com/ollama/ollama/blob/main/docs/api.md)) both offer the ChatCompletion interface. To ensure compatibility with OpenAI and Ollama, the Server offers APIs that are consistent with theirs. Therefore, applications currently using OpenAI and Ollama can seamlessly switch to our Server. For example: [How to use Tabby and ktransformers locally with a 236B model for code completion?](tabby.md). +- The Assistant is suitable for applications that need to reuse a series of resources and call the model. For instance, in educational applications, developers can create an Assistant named "Second Grade Math Teacher" and set an initial prompt ("You are an experienced second-grade math teacher..."), and upload relevant materials (second grade math textbooks). After creating the Assistant, the application needs to create a Thread to store the dialogues between the user and the model (Message). When calling the model, the application creates a Run to obtain the Assistant's response. Compared to ChatCompletion, the Assistant-enabled Server handles the reuse of conversational contexts and multi-turn dialogues, making model calls in complex scenarios more convenient. The [OpenAI Assistant API](https://platform.openai.com/docs/api-reference/assistants/createAssistant) introduces such an Assistant interface, and the Server provides a consistent API. + +These API definitions are located in `server/api`, and their specific usage can be seen [here](api.md). + +## Integrating Model Inference Frameworks + +The Server uses ktransformers for model calling and inference. It also supports other inference frameworks, such as the already supported [transformers](https://huggingface.co/docs/transformers/index), and plans to support [exllamav2](https://github.com/turboderp/exllamav2). These functionalities are implemented in `server/backend`. + +The model inference functionalities of the frameworks are abstracted into a base class `BackendInterfaceBase`. This class includes a function: inference. It takes historical dialogue information messages as input and returns the text result from the model. The inference function adopts an async generator design, allowing the Server to return model responses in a streaming manner. + +```python +class BackendInterfaceBase: + async def inference(self, messages, **kwargs)->AsyncIterator[str]: + ... +``` + +This inference function naturally implements the functionality of ChatCompletion because its inputs and outputs are historical dialogues and model responses, respectively. Thus, the ChatCompletion API can directly call the inference function to complete model inference. + +Assistant is more complex than ChatCompletion, requiring the Server to store the related state of the Assistant and call the inference function appropriately. The Server maintains a set of Assistant logic in the database, storing the Assistants, Threads, and Messages created by applications. In memory, the Server maintains a `ThreadContext` for each Thread, gathering information related to each Thread's Assistant, etc. When a user sends a new Message, the Server calls the get_local_messages function of ThreadContext to obtain messages and then calls the inference function to get the inference results. + +```python +class MyThreadContext(ThreadContext): + def get_local_messages(self): + ... +``` + +Since different model inference frameworks have different historical dialogue input formats, `ThreadContext` and `BackendInterface` need to be used in pairs. Besides its own ktransformers, the Server also supports transformers. For integrating other model inference frameworks, refer to the implementations of `TransformersInterface` and `TransformersThreadContext` in [transformers.py](https://github.com/kvcache-ai/ktransformers-dev/blob/main/ktransformers/server/backend/interfaces/transformers.py). \ No newline at end of file diff --git a/doc/en/api/server/tabby.md b/doc/en/api/server/tabby.md new file mode 100644 index 0000000..91b554f --- /dev/null +++ b/doc/en/api/server/tabby.md @@ -0,0 +1,33 @@ +# How to Use Tabby and ktransformers Locally with 236B Large Models for Code Completion? + +[Tabby](https://tabby.tabbyml.com/docs/welcome/) is an open-source code assistant that allows users to manually configure the backend framework and model, and use it across multiple IDEs/editors, such as VSCode and IntelliJ. Since Tabby can interface with Ollama on the framework side, and the ktransformers server provides a consistent API with Ollama, we can connect Tabby to the ktransformers server. This setup allows us to experience fast, heterogeneous inference in code completion scenarios. + +1. Start ktransformers. +```bash +./ktransformers --port 9112 +``` +2. Install Tabby: Follow the official tutorial to install Tabby on a Linux server or Windows PC with an NVIDIA GPU [here](https://tabby.tabbyml.com/docs/quick-start/installation/linux/). +3. Configure Tabby: Create `~/.tabby/config.toml` and add the following configuration. +```toml +[model.completion.http] +kind = "ollama/completion" +api_endpoint = "http://127.0.0.1:9112/" +model_name = "DeepSeek-Coder-V2-Instruct" +prompt_template = "<๏ฝœfimโ–begin๏ฝœ>{prefix}<๏ฝœfimโ–hole๏ฝœ>{suffix}<๏ฝœfimโ–end๏ฝœ>" # Prompt Template +``` + +In this configuration, `kind` specifies that ktransformers uses the standard Ollama API to serve Tabby; `api_endpoint` matches the interface bound when launching ktransformers; `model_name` is set to the model used by ktransformers, here `DeepSeek-Coder-V2-Instruct` is the backend inference model; `prompt_template` is the model's prompt template, which requires a corresponding template for different models to use the Fill In the Middle feature properly. +Here we demonstrate the relevant configuration for Tabby using the Ollama API to provide the Completion feature. For configuration information about other functions available in Tabby, refer to [here](https://tabby.tabbyml.com/docs/administration/model/). + + +4. Start the Tabby service: `./tabby serve`. +image-20240709112329577 + + After launching, you should see access to the `/api/tags` interface in the ktransformers command line (in version v0.13.0 of Tabby, this changes to access to the `/api/show/` interface). +image-20240709111648215 + +6. Register a Tabby account, obtain a Token: After starting the Tabby service, open the corresponding link in a browser (as shown above at 0.0.0.0:8080), and follow the [tutorial](https://tabby.tabbyml.com/docs/quick-start/register-account/) to create a user and get a Token. + +7. Start VSCode, install the Tabby extension plugin, and use the Token obtained in the previous step to connect to the Tabby Server, following [here](https://tabby.tabbyml.com/docs/extensions/installation/vscode/). + +8. Open any code file and experience the fast heterogeneous inference of ktransformers. \ No newline at end of file diff --git a/doc/en/api/server/visit-api-tags.png b/doc/en/api/server/visit-api-tags.png new file mode 100644 index 0000000..d763dd2 Binary files /dev/null and b/doc/en/api/server/visit-api-tags.png differ diff --git a/doc/en/api/server/website.md b/doc/en/api/server/website.md new file mode 100644 index 0000000..bd380cd --- /dev/null +++ b/doc/en/api/server/website.md @@ -0,0 +1,32 @@ +# Start with website + +This document provides the necessary steps to set up and run the web service for this project. + +## 1. Starting the Web Service + +### 1.1. Compiling the Web Code + +Before you can compile the web code, make sure you have installed [Node.js](https://nodejs.org) version 18.3 or higher + +Once npm is installed, navigate to the `ktransformers/website` directory: + +```bash +cd ktransformers/website +``` + +Next, install the Vue CLI with the following command: + +```bash +npm install @vue/cli +``` + +Now you can build the project: + +```bash +npm run build +``` +Finally you can build ktransformers with website: +``` +cd ../../ +pip install . +``` diff --git a/doc/en/deepseek-v2-injection.md b/doc/en/deepseek-v2-injection.md new file mode 100644 index 0000000..43359cf --- /dev/null +++ b/doc/en/deepseek-v2-injection.md @@ -0,0 +1,166 @@ +# Tutorial: Heterogeneous and Local DeepSeek-V2 Inference + +DeepSeek-(Code)-V2 is a series of strong mixture-of-experts (MoE) models, featuring a total of 236 billion parameters, with 21 billion parameters activated per token. This model has demonstrated remarkable reasoning capabilities across various benchmarks, positioning it as one of the SOTA open models and nearly comparable in performance to GPT-4. + +

+ + DeepSeek-Coder-V2 Score + +

+ +Moreover, unlike previous models that employed traditional attention mechanisms like Grouped-Query Attention (GQA), DeepSeek-V2 incorporates a novel Multi-head Latent Attention (MLA). This innovation significantly reduces the size of the KV cache required during inference, enhancing efficiency. + + +However, despite its efficiency, the practicality of running such a large model on personal computing setups seems impractical. Official documentation for DeepSeek-V2 indicates that eight 80GB GPUs are necessary for standard inference operations, and even the scaled-down Q4_k_m version requires at least two 80GB GPUs. These requirements are beyond the reach of most individual researchers and small teams. + + +Nonetheless, by employing several cutting-edge optimization techniques, we have successfully operated this colossal model on a desktop computer with only 21GB of VRAM and 136GB of DRAM. In this document, we outline the specific optimizations utilized and provide a detailed tutorial on how to implement these strategies using KTransformers. + +## Applied Optimizations + +### Optimized MLA Operator + +The following figure provides a brief overview of DeepSeek-V2 architecture. At the heart of its attention layer, DeepSeek-V2 introduces a novel MLA operator that represents the heads of key-value pairs using a common, joint compressed representation, which holds significant potential for efficiency improvements. However, the official open-source implementation of the MLA operator explicitly decompresses this compressed representation and caches the decompressed key-value pairs. This process not only enlarges the KV cache size but also diminishes inference performance. + +

+ + DeepSeek on KTransformers + +

+ +To truly capitalize on the benefits of MLA, we have implemented an optimized version for inference. According to its original paper, we absorb the decompression matrices directly into the q_proj and out_proj weights. Consequently, the compressed representation does not need to be decompressed to compute the attention. This adjustment significantly reduces the KV cache size and increases the arithmetic intensity of this operator, which greatly optimizes the utilization of GPU computational power. + +### Advanced Quantization Kernels + +The original DeepSeek-V2 model stores its parameters in BF16 format, consuming approximately 470GB of raw storage. This exceeds the RAM capacity available on mainstream desktop computers. To address this, we leverage the well-established GGUF community's quantized weights to simplify the process for users. +However, quantized data types are not typically supported by highly-optimized BLAS packages. As a result, the original HuggingFace Transformers' Torch implementation must dequantize these tensors to supported data types before processing, which introduces unnecessary computational overhead and increases memory traffic. To overcome this, we have incorporated advanced kernels that operate directly on quantized data types, thereby optimizing inference performance. + + +In the current version of KTransformers, we utilize Marlin for GPU kernels and llamafile for CPU kernels. These kerenls are specially designed to benefit from modern GPU architecture and modern CPU instruction extensions such as AVX512-BF16 (AMD Zen4 or newer) and AVX-VNNI (Intel Alder Lake or newer), that are tailored for quantized data types and machine learning workloads. We also use expert parallelism and other optimization for MOE inferencem on CPU based on llamafile, and call them as CPUInfer. As demonstrated in Figure 2(cite from Marlin), Marlin can achieve near ideal 3.87x speedup compare to corresponding Torch counterparts. As demonstrated in the following figure, our micro benchmarks show that inference using CPUInfer performs several times faster than Torch in low bits representation. Note that in practical inference such as using transformers, the Torch baseline use BF16 or FP16 as linear weights, and will occupy more memory resources, or it will be more slower due to dequantization when using quanted weights. + +

+ + CPUInfer Performance + +

+ + +### Arithmetic Intensity Guided Offloading + +Storing all 236 billion parameters of a model in GPU VRAM is clearly impractical for local users. Therefore, we strategically store only the most computationally intensive parameters on the GPU. For instance, after our optimizations, the MLA operator, which contains 128 heads with a shared compressed key-value representation, shows an arithmetic intensity of 512. This makes it the most intensive operator, particularly during smaller inference batch sizes. Hence, it is allocated to the GPU to leverage the power of tensor cores. + + +On the other hand, as shown in Figure 1, each transformer block in DeepSeek-V2 includes 160 mixture-of-experts (MoE) experts, comprising 96% of the total parameters. However, the MoE router activates only 6 out of these 160 experts for each token, which means that only 3.75% of the MoE parameters are utilized during the decoding phase. With a batch size of one, the arithmetic intensity of the MoE operation is roughly 0.075. This operation, primarily involving a batched General Matrix-Vector Multiplication (GEMV), can thus be efficiently handled by the CPU. + + +Following this principle of arranging all operators by their arithmetic intensity and placing the most intensive ones in the GPU as much as possible, we prioritize positioning the MoE parameters and word embeddings computations on the CPU side to utilize its larger memory capacity. Meanwhile, the remaining parameters, including shared experts, projections in the attention module, and MLA, are stored in the GPU VRAM. As these parameters are accessed by every token, their placement on the GPU maximizes the benefits of high memory bandwidth. This configuration leads to approximately 20.7 GB of VRAM usage and 136GB DRAM memory requests if the Q4_K_M version is used, which is feasible even on a local desktop. Additionally, the placement can be adjusted according to the actual configuration, adhering to the same principle. + + +Moreover, as an extensible framework, KTransformers is set to support more advanced operators in future releases, continually enhancing its capability to handle diverse workloads efficiently. + +## YAML Template + +To implement the above optimizations in KTransformers, users need to write a YAML file containing the optimized rules. +KTransformers will iterate through all sub-modules of the model, match rules specified in the YAML rule file, and replace them with advanced modules as specified. + +

+ + Inject-Struction + +

+ +Specifically, the following rules are used: + +- Replace the Attention module with our [optimized MLA Operator](#mla). +- Replace routed experts with [CPUInfer kernels](#experts) that use Llamafile. +- Replace all Linear modules not belonging to attention with [Marlin](#linear) kernels. + + + +

MLA

+ +For attention module injection, we only need to match the module name used in Transformers using a regular expression and replace it with our pre-implemented module. +The YAML rule is listed below. + +```yaml +- match: + name: "^model\\.layers\\..*\\.self_attn$" # regular expression + replace: + class: ktransformers.operators.attention.DeepseekV2AttentionInjected # optimized MLA implementation +``` + +As we can see, each rule in the YAML file has two parts: `match` and `replace`. +The match part specifies which module should be replaced, and the replace part specifies the module to be injected into the model along with the initialization keywords. + +

Routed Experts

+ +For routed experts, the module we inject is a wrapper of CPUInfer, KTransformersMLPExpert. There are several implementations within a wrapper, and we need to specify keywords to tell the wrapper which implementation we want to use and how we intend to use it. + +In KTransformers, some models exhibit different behaviors during prefilling and generation for better performance. KTransformersMLPExpert is one of them. All these special modules have a `device` keyword describing which device the module should be initialized on. Other keywords specify the behaviors during prefilling and generation and may be differ when using different injection modules. Here, we specify which implementation on which device we want to use during prefilling and generation, and which device the output should be on. +Note that we only use these parameters when layer-wise prefilling is enabled; otherwise, prefilling is conducted with the same configuration as generation. + +In the original implementation of Transformers, MoE is implemented using `nn.ModuleList`. We don't want KTransformers to iterate through all the sub-modules in the list, so we set `recursive: False` in this rule to prevent recursive injection into submodules of the current module. Here is the YAML rule: + +```yaml +- match: + name: "^model\\.layers\\..*\\.mlp\\.experts$" + replace: + class: ktransformers.operators.experts.KTransformersMLPExpert # custom MoE Kernel with expert parallelism + device: "cpu" # device to load this module on initialization + kwargs: + prefill_device: "cuda" + prefill_mlp_type: "MLPExpertsTorch" + generate_device: "cpu" + generate_mlp_type: "MLPCPUExperts" + out_device: "cuda" + recursive: False # don't recursively inject submodules of this module +``` + +If we inject the expert list as a custom module, we can't use the interface in `nn.ModuleList` as default. We need to change the forward function in the FFN module. The simplest way is implementing a new module using custom forward function and inject it. We have implemented the new module, and the injection can be done by simply adding an injection rule. We can use the `class` instead of `name` to match a module that will be replaced. Here is the YAML rule: + +```yaml +- match: + class: ktransformers.models.modeling_deepseek.DeepseekV2MoE + replace: + class: ktransformers.operators.experts.DeepseekV2MoEInjected # MLP module with custom forward function +``` + +

Other Linear Modules

+ +For the remained linear modules, we want to use our quantization kernels. However, we don't want to inject linear in the MLA operator because we currently don't know the effect of using quantization in MLA. +So, we can change our regular expression and add a class check in the match part of the rule. Only modules matching both name and class simultaneously will be injected. +We also need to transfer some keywords similar to the injection of experts. Here is the YAML rule: + +```yaml +- match: + name: "^model\\.layers\\.(?!.*self_attn).*$" # regular expression + class: torch.nn.Linear # only match modules matching name and class simultaneously + replace: + class: ktransformers.operators.linear.KTransformerLinear # optimized Kernel on quantized data types + kwargs: + generate_device: "cuda" + prefill_device: "cuda" + generate_op: "QuantizedLinearMarlin" + prefill_op: "QuantizedLinearTorch" +``` + +

Pre-compute Buffers

+ +The original model is initialized on the meta device. The rotary embedding module pre-computes some buffers when initializing, which has no effect and doesn't compute anything when using the meta device. Therefore, we need to compute the buffers when loading the model. For convenience, we inject the rotary embedding module with our custom module, which performs pre-computations when loading. Here is the YAML rule: + +```yaml +- match: + class: ktransformers.models.modeling_deepseek.DeepseekV2YarnRotaryEmbedding + replace: + class: ktransformers.operators.RoPE.YarnRotaryEmbedding +``` + +## Wrap Your Custom Module + +We have implemented some modules, but you may need to inject your custom module using KTransformers. +The only thing you need to do is wrap your custom module and write YAML files. We provide a base operator specifying interfaces an injection module should have. You only need to inherit from that module and change the `__init__`, `forward`, or `load` function as needed. + +- The `__init__` function of the base operator maintains the necessary information for injection and execution of the KTransformers framework. To override this function, subclass modules need to call the base operator's `__init__` function in their own initializer. +- The `forward` function is a function in torch that will be called during inference, where the module author has the freedom to achieve higher performance. +- The `load` function is used to load all parameters of this module. The default implementation is to call the `load` function of all submodules. You can modify this function to customize its loading method and explicitly control the loading of its submodules. + diff --git a/doc/en/operators/Combined_MoE_time_per_layer.png b/doc/en/operators/Combined_MoE_time_per_layer.png new file mode 100644 index 0000000..8979562 Binary files /dev/null and b/doc/en/operators/Combined_MoE_time_per_layer.png differ diff --git a/doc/en/operators/Linear_projection_time.png b/doc/en/operators/Linear_projection_time.png new file mode 100644 index 0000000..6ccb89a Binary files /dev/null and b/doc/en/operators/Linear_projection_time.png differ diff --git a/doc/en/operators/llamafile.md b/doc/en/operators/llamafile.md new file mode 100644 index 0000000..013c380 --- /dev/null +++ b/doc/en/operators/llamafile.md @@ -0,0 +1,35 @@ +# Llamafile Operators Documentation + +## Llamafile Sgemm + +The Llamafile Sgemm module is an efficient implementation of general matrix multiplication (GEMM) extracted from the great [Llamafile project](https://github.com/Mozilla-Ocho/llamafile/blob/main/llamafile/sgemm.cpp). +This module optimizes performance by utilizing various processor-specific instruction sets. For instance, it checks for different x86 instruction sets such as AVX, FMA, and AVX512, leveraging these advanced instructions to accelerate computation. +Additionally, the Llamafile Sgemm module supports multiple quantization types, including q8_0, q6_k, and q5_k, among others. This adaptability to different hardware capabilities ensures the most advanced instructions are used in any given computing environment, achieving high computational efficiency. For more information, you can view the [Llamafile Sgemm module](https://github.com/Mozilla-Ocho/llamafile/blob/main/llamafile/sgemm.cpp) on GitHub. + + +## CPUInfer +To power Llamafile and many future CPU kernels without the original GGML framework, we developed a simple CPUInfer multi-threaded execution framework. It currently leverages the Llamafile Sgemm module to implement key operators such as linear layers, MLP, and MoE, and will be extended to support many other operators. These operators are fundamental components for building large models. CPUInfer features a backend work-stealing thread pool and asynchronous task queue execution logic to efficiently offload parts of model parameters to the CPU, thereby maintaining high inference performance. It supports adjustments based on hardware capabilities or user configurations, providing enhanced inference performance and making it an ideal tool for running deep learning models on CPUs. + +## Expert-Parallel MoE + +The MoE module's performance can be enhanced by using custom kernels that utilize **expert parallelism**. Since the routed experts are independently computable, we can utilize this inherent parallelism to speed up MoE computations. Specifically, we can allocate each expert MLP to a separate thread group, allowing for the simultaneous computation of all routed experts. This approach of expert parallelism significantly boosts MoE performance by minimizing the frequency of global synchronizations and reducing kernel launch overhead compared to sequential expert computation. + +## Microbenchmark + +Our evaluations were conducted on an Intel(R) Xeon(R) Gold 6454S processor, utilizing real parameters from the DeepSeek-Coder-V2-Instruct model. + +### Linear Projection + +The performance of the linear layer was assessed using an Attention Output Projection with dimensions of [5120, 16384]. Here, the input was a vector of 16384 dimensions, and the output was a vector of 5120 dimensions. + +![Linear_projection_time](Linear_projection_time.png) + +As we can see, in half-precision floating-point formats (fp16 and bf16), CPUInfer's performance exceeded that of Torch by 1.7 and 1.5 times, respectively. For 8-bit quantization, CPUInfer (supporting q8_0) and Torch (supporting qint8) demonstrated nearly equivalent performance. However, CPUInfer employs a more refined scaling approach, using different factors for each group (in q8_0 quantization, every 32 numbers form one group), whereas Torch uses a basic per-tensor quantization, potentially leading to significant precision loss. Furthermore, CPUInferโ€™s capability to use lower-bit quantization enhances inference speed in specific scenarios. + +### MoE + +In the MoE module, each token selected 6 experts out of 160 for computation, with input and output dimensions of 5120, and an intermediate dimension of 1536. + +![Combined_MoE_time_per_layer](Combined_MoE_time_per_layer.png) + +For half-precision floating points and 8-bit quantization formats, CPUInfer's generation performance was 2.5 and 3.2 times better than Torch, respectively. Moreover, using the 8-bit quantization format, CPUInfer achieved faster prefill speeds compared to Torch, with shorter prompts highlighting a more pronounced performance difference. diff --git a/doc/zh/api/server/api.md b/doc/zh/api/server/api.md new file mode 100644 index 0000000..65e88b8 --- /dev/null +++ b/doc/zh/api/server/api.md @@ -0,0 +1,115 @@ +# API + + +- [OpenAI ChatCompletion](#openai-chatcompletion) +- [Ollama ChatCompletion](#ollama-chatcompletion) +- [OpenAI Assistant](#openai-assistant) + + +## OpenAI ChatCompletion +```bash +POST /v1/chat/completions +``` +ๆ นๆฎ้€‰ๅฎš็š„ๆจกๅž‹็”Ÿๆˆๅ›žๅคใ€‚ + +### ๅ‚ๆ•ฐ + + +- `messages`๏ผšไธ€ไธช `message` ็š„ๆ•ฐ็ป„ๆ‰€ๆœ‰็š„ๅކๅฒๆถˆๆฏใ€‚`message`๏ผš่กจ็คบ็”จๆˆท๏ผˆuser๏ผ‰ๆˆ–่€…ๆจกๅž‹๏ผˆassistant๏ผ‰็š„ๆถˆๆฏใ€‚`message`ๅŒ…ๅซ๏ผš + + - `role`: ๅ–ๅ€ผ`user`ๆˆ–`assistant`๏ผŒไปฃ่กจ่ฟ™ไธช message ็š„ๅˆ›ๅปบ่€…ใ€‚ + - `content`: ็”จๆˆทๆˆ–่€…ๆจกๅž‹็š„ๆถˆๆฏใ€‚ + +- `model`๏ผš้€‰ๅฎš็š„ๆจกๅž‹ๅ +- `stream`๏ผšๅ–ๅ€ผ true ๆˆ–่€… falseใ€‚่กจ็คบๆ˜ฏๅฆไฝฟ็”จๆตๅผ่ฟ”ๅ›žใ€‚ๅฆ‚ๆžœไธบ true๏ผŒๅˆ™ไปฅ http ็š„ event stream ็š„ๆ–นๅผ่ฟ”ๅ›žๆจกๅž‹ๆŽจ็†็ป“ๆžœใ€‚ + +### ๅ“ๅบ” + +- ๆตๅผ่ฟ”ๅ›ž๏ผšไธ€ไธช event stream๏ผŒๆฏไธช event ๅซๆœ‰ไธ€ไธช`chat.completion.chunk`ใ€‚`chunk.choices[0].delta.content`ๆ˜ฏๆฏๆฌกๆจกๅž‹่ฟ”ๅ›ž็š„ๅขž้‡่พ“ๅ‡บใ€‚ +- ้žๆตๅผ่ฟ”ๅ›ž๏ผš่ฟ˜ๆœชๆ”ฏๆŒใ€‚ + +### ไพ‹ๅญ + +```bash +curl -X 'POST' \ + 'http://localhost:9112/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "messages": [ + { + "content": "tell a joke", + "role": "user" + } + ], + "model": "Meta-Llama-3-8B-Instruct", + "stream": true +}' +``` + +```bash +data:{"id":"c30445e8-1061-4149-a101-39b8222e79e1","object":"chat.completion.chunk","created":1720511671,"model":"not implmented","system_fingerprint":"not implmented","usage":null,"choices":[{"index":0,"delta":{"content":"Why ","role":"assistant","name":null},"logprobs":null,"finish_reason":null}]} + +data:{"id":"c30445e8-1061-4149-a101-39b8222e79e1","object":"chat.completion.chunk","created":1720511671,"model":"not implmented","system_fingerprint":"not implmented","usage":null,"choices":[{"index":0,"delta":{"content":"","role":"assistant","name":null},"logprobs":null,"finish_reason":null}]} + +data:{"id":"c30445e8-1061-4149-a101-39b8222e79e1","object":"chat.completion.chunk","created":1720511671,"model":"not implmented","system_fingerprint":"not implmented","usage":null,"choices":[{"index":0,"delta":{"content":"couldn't ","role":"assistant","name":null},"logprobs":null,"finish_reason":null}]} + +... + +data:{"id":"c30445e8-1061-4149-a101-39b8222e79e1","object":"chat.completion.chunk","created":1720511671,"model":"not implmented","system_fingerprint":"not implmented","usage":null,"choices":[{"index":0,"delta":{"content":"two-tired!","role":"assistant","name":null},"logprobs":null,"finish_reason":null}]} + +event: done +data: [DONE] +``` + + + +## Ollama ChatCompletion + +```bash +POST /api/generate +``` + +ๆ นๆฎ้€‰ๅฎš็š„ๆจกๅž‹็”Ÿๆˆๅ›žๅคใ€‚ + +### ๅ‚ๆ•ฐ + + +- `prompt`๏ผšไธ€ไธชๅญ—็ฌฆไธฒ๏ผŒไปฃ่กจ่พ“ๅ…ฅ็š„ promptใ€‚ +- `model`๏ผš้€‰ๅฎš็š„ๆจกๅž‹ๅ +- `stream`๏ผšๅ–ๅ€ผ true ๆˆ–่€… falseใ€‚่กจ็คบๆ˜ฏๅฆไฝฟ็”จๆตๅผ่ฟ”ๅ›žใ€‚ๅฆ‚ๆžœไธบ true๏ผŒๅˆ™ไปฅ http ็š„ event stream ็š„ๆ–นๅผ่ฟ”ๅ›žๆจกๅž‹ๆŽจ็†็ป“ๆžœใ€‚ + +### ๅ“ๅบ” + +- ๆตๅผ่ฟ”ๅ›ž๏ผšไธ€ไธชๆตๅผ็š„ json ่ฟ”ๅ›ž๏ผŒๆฏ่กŒๆ˜ฏไธ€ไธช jsonใ€‚ + - `response`๏ผšๆจกๅž‹่กฅๅ…จ็š„ๅขž้‡็ป“ๆžœใ€‚ + - `done`๏ผšๆ˜ฏๅฆๆŽจ็†็ป“ๆŸใ€‚ + +- ้žๆตๅผ่ฟ”ๅ›ž๏ผš่ฟ˜ๆœชๆ”ฏๆŒใ€‚ + +### ไพ‹ๅญ + +```bash +curl -X 'POST' \ + 'http://localhost:9112/api/generate' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "model": "Meta-Llama-3-8B-Instruct", + "prompt": "tell me a joke", + "stream": true +}' +``` + +```bash +{"model":"Meta-Llama-3-8B-Instruct","created_at":"2024-07-09 08:13:11.686513","response":"I'll ","done":false} +{"model":"Meta-Llama-3-8B-Instruct","created_at":"2024-07-09 08:13:11.729214","response":"give ","done":false} + +... + +{"model":"Meta-Llama-3-8B-Instruct","created_at":"2024-07-09 08:13:33.955475","response":"for","done":false} +{"model":"Meta-Llama-3-8B-Instruct","created_at":"2024-07-09 08:13:33.956795","response":"","done":true} +``` + + + diff --git a/doc/zh/api/server/run-tabby.png b/doc/zh/api/server/run-tabby.png new file mode 100644 index 0000000..57d962d Binary files /dev/null and b/doc/zh/api/server/run-tabby.png differ diff --git a/doc/zh/api/server/server-arch.png b/doc/zh/api/server/server-arch.png new file mode 100644 index 0000000..093b914 Binary files /dev/null and b/doc/zh/api/server/server-arch.png differ diff --git a/doc/zh/api/server/server.md b/doc/zh/api/server/server.md new file mode 100644 index 0000000..929f385 --- /dev/null +++ b/doc/zh/api/server/server.md @@ -0,0 +1,41 @@ +# ๅŽ็ซฏๆœๅŠก๏ผˆServer๏ผ‰ +Server ๅฐ† ktransformers ็š„ๅฟซ้€Ÿๅผ‚ๆž„ๆŽจ็†่ƒฝๅŠ›้€š่ฟ‡ API ๆไพ›็ป™ๅค–็•Œ่ฐƒ็”จใ€‚ + +Serverๆžถๆž„ + +## API + +Server ้€š่ฟ‡ RESTful API ๅฏนๅค–ๆไพ›ๆจกๅž‹ๆŽจ็†ๆœๅŠก๏ผŒๆไพ› ChatCompletion ๅ’Œ Assistant ไธค็ง่ฐƒ็”จๆ–นๅผใ€‚ + +- ChatCompletion ๆŽฅๅฃ่ฆๆฑ‚็”จๆˆทไธ€ๆฌกๆไพ›ๆ‰€ๆœ‰็š„ๅކๅฒๅฏน่ฏ๏ผŒ็„ถๅŽ่ฟ”ๅ›žๆจกๅž‹็š„ๅ›žๅคใ€‚AI ๆœๅŠกๆไพ›ๅ•†๏ผˆไพ‹ๅฆ‚[OpenAI](https://platform.openai.com/docs/api-reference/chat/create) ๏ผ‰ๅ’ŒๆœฌๅœฐๆŽจ็†ๆก†ๆžถ๏ผˆไพ‹ๅฆ‚[Ollama](https://github.com/ollama/ollama/blob/main/docs/api.md) ๏ผ‰้ƒฝๆไพ› ChatCompletion ๆŽฅๅฃใ€‚ไธบไบ†ๅ…ผๅฎน OpenAI ๅ’Œ Ollama๏ผŒServer ๅˆ†ๅˆซๆไพ›ๅ’Œๅฎƒไปฌไธ€่‡ด็š„ API ๆŽฅๅฃใ€‚ๅ› ๆญค๏ผŒๅฝ“ๅ‰ไฝฟ็”จ OpenAI ๅ’Œ Ollama ็š„ๅบ”็”จๅฏไปฅๆ— ็ผๅˆ‡ๆขๅˆฐๆˆ‘ไปฌ็š„ Serverใ€‚ไพ‹ๅฆ‚๏ผš [ๅฆ‚ไฝ•ไฝฟ็”จ Tabby ๅ’Œ ktransformers ๅœจๆœฌๅœฐๅˆฉ็”จ 236B ็š„ๅคงๆจกๅž‹ๅšไปฃ็ ่กฅๅ…จ๏ผŸ](tabby.md)ใ€‚ +- Assistant ้€‚็”จไบŽๅบ”็”จ้œ€่ฆๅค็”จไธ€็ณปๅˆ—่ต„ๆบๅนถ่ฐƒ็”จๆจกๅž‹็š„ๅœบๆ™ฏใ€‚ไพ‹ๅฆ‚๏ผŒๅœจๆ•™่‚ฒๅบ”็”จๅœบๆ™ฏไธญ๏ผŒๅบ”็”จๅผ€ๅ‘่€…ๅฏไปฅๅˆ›ๅปบไธ€ไธชๅไธบไบŒๅนด็บงๆ•ฐๅญฆ่€ๅธˆ็š„ Assistant๏ผŒๅนถ่ฎพ็ฝฎๅˆๅง‹prompt๏ผˆโ€œไฝ ๆ˜ฏไธ€ไธชๆœ‰็ป้ชŒ็š„็š„ไบŒๅนด็บงๆ•ฐๅญฆ่€ๅธˆ...โ€๏ผ‰๏ผŒไธŠไผ ็›ธๅ…ณ็š„่ต„ๆ–™๏ผˆไบŒๅนด็บงๆ•ฐๅญฆๆ•™ๆ๏ผ‰ใ€‚ๅˆ›ๅปบ Assistant ๅŽ๏ผŒๅบ”็”จ้œ€่ฆๅˆ›ๅปบไธ€ไธช Thread ๆฅๅญ˜ๅ‚จ็”จๆˆทๅ’Œๆจกๅž‹็š„ๅฏน่ฏๆถˆๆฏ๏ผˆMessage๏ผ‰ใ€‚่ฐƒ็”จๆจกๅž‹ๆ—ถ๏ผŒๅบ”็”จ้œ€่ฆๅˆ›ๅปบไธ€ไธช Run ๆฅ่Žทๅพ— Assistant ็š„ๅ›žๅคใ€‚็›ธๅฏนไบŽ ChatCompletion๏ผŒๅฎž็Žฐไบ† Assistant ็š„ Server ไปฃๆ›ฟๅบ”็”จๅฎž็Žฐไบ†ๅฏน่ฏ่ƒŒๆ™ฏๅค็”จๅ’Œๅคš่ฝฎๅฏน่ฏ๏ผŒไฝฟๅพ—ๅคๆ‚ๅœบๆ™ฏไธ‹็š„ๆจกๅž‹็š„่ฐƒ็”จๆ›ดๅŠ ๆ–นไพฟใ€‚ [OpenAI Assistant API](https://platform.openai.com/docs/api-reference/assistants/createAssistant) ๆๅ‡บไบ†่ฟ™ๆ ท็š„ Assistant ๆŽฅๅฃ๏ผŒ่€Œ Server ไนŸๆไพ›ๅ’Œๅฎƒไธ€่‡ด็š„ API ใ€‚ + +่ฟ™ไบ› API ๅฎšไน‰ๅœจ`server/api`ไธญ๏ผŒๅฎƒไปฌ็š„ๅ…ทไฝ“ไฝฟ็”จ่ฏท่ง[่ฟ™้‡Œ](api.md)ใ€‚ + + +## ๅฏนๆŽฅๆจกๅž‹ๆŽจ็†ๆก†ๆžถ + +Server ้€š่ฟ‡ ktransformers ่ฐƒ็”จๆจกๅž‹ๅนถ่ฟ›่กŒๆŽจ็†ใ€‚Server ไนŸๆ”ฏๆŒๅ…ถไป–็š„ๆŽจ็†ๆก†ๆžถ๏ผŒไพ‹ๅฆ‚ๅทฒ็ปๆ”ฏๆŒ็š„ [transformers](https://huggingface.co/docs/transformers/index) ๏ผŒๅนถ่ฎกๅˆ’ๆ”ฏๆŒ [exllamav2](https://github.com/turboderp/exllamav2)ใ€‚่ฟ™ไบ›ๅŠŸ่ƒฝๅœจ`server/backend` ไธญๅฎž็Žฐใ€‚ + +Server ๅฐ†ๆจกๅž‹ๆŽจ็†ๆก†ๆžถ็š„ๆŽจ็†ๅŠŸ่ƒฝๆŠฝ่ฑกๆˆไธ€ไธชๅŸบ็ฑป`BackendInterfaceBase`ใ€‚่ฟ™ไธชๅŸบ็ฑปๅŒ…ๅซไธ€ไธชๅ‡ฝๆ•ฐ๏ผšinferenceใ€‚ๅฎƒ็š„่พ“ๅ…ฅๆ˜ฏๆ˜ฏๅކๅฒ็š„ๅฏน่ฏไฟกๆฏ messages๏ผŒ่พ“ๅ‡บๆ˜ฏๆจกๅž‹่ฟ”ๅ›ž็š„ๆ–‡ๅญ—็ป“ๆžœใ€‚inference ๅ‡ฝๆ•ฐ้‡‡็”จ async generator ็š„่ฎพ่ฎก๏ผŒ่ฟ™ไฝฟๅพ— Server ๅฏไปฅๆตๅผๅœฐ่ฟ”ๅ›žๆจกๅž‹็š„ๅ›žๅคใ€‚ + +```python +class BackendInterfaceBase: + async def inference(self, messages, **kwargs)->AsyncIterator[str]: + ... +``` + +่ฟ™ไธช inference ๅ‡ฝๆ•ฐ๏ผŒๅ› ไธบๅฎƒ็š„่พ“ๅ…ฅๅ’Œ่พ“ๅ‡บๅˆ†ๅˆซๆ˜ฏๅކๅฒๅฏน่ฏๅ’Œๆจกๅž‹ๅ›žๅค๏ผŒๆ‰€ไปฅๅฎƒ่‡ช็„ถๅœฐๅฎž็Žฐไบ† ChatCompletion ็š„ๅŠŸ่ƒฝใ€‚ๅ› ๆญค ChatCompletion API ๅฏไปฅ็›ดๆŽฅ่ฐƒ็”จinference ๅ‡ฝๆ•ฐๅฎŒๆˆๆจกๅž‹ๆŽจ็†ใ€‚ + +่€Œ Assistant ๅˆ™ๆฏ” ChatCompletion ๅคๆ‚่ฎธๅคš๏ผŒ้œ€่ฆ Server ๅญ˜ๅ‚จ Assistant ็š„็›ธๅ…ณ็Šถๆ€๏ผŒๅนถไปฅๅˆ้€‚็š„ๆ–นๅผ่ฐƒ็”จ inference ๅ‡ฝๆ•ฐใ€‚Server ๅœจๆ•ฐๆฎๅบ“ไธญ็ปดๆŠคไบ†ไธ€ๅฅ— Assistant ้€ป่พ‘๏ผŒๅญ˜ๅ‚จๅบ”็”จๅˆ›ๅปบ็š„ Assistant๏ผŒThread ๅ’Œ Messageใ€‚ๅœจๅ†…ๅญ˜ไธญ๏ผŒServer ไธบๆฏไธช Thread ็ปดๆŠคไธ€ไธช `ThreadContext`๏ผŒ้›†ๅˆๆฏไธชThread ็›ธๅ…ณ็š„ Assistant ็ญ‰ไฟกๆฏใ€‚ๅฝ“็”จๆˆทๅ‘ๅ‡บๆ–ฐ็š„ Message ๆ—ถ๏ผŒServer ่ฐƒ็”จ ThreadContext ็š„get_local_messagesๅ‡ฝๆ•ฐ๏ผŒ่Žทๅพ— messages๏ผŒๅนถ่ฐƒ็”จ inference ๅ‡ฝๆ•ฐ่Žทๅพ—ๆŽจ็†็ป“ๆžœใ€‚ + +```python +class MyThreadContext(ThreadContext): + def get_local_messages(self): + ... +``` + +็”ฑไบŽไธๅŒ็š„ๆจกๅž‹ๆŽจ็†ๆก†ๆžถๆœ‰็€ไธๅŒ็š„ๅކๅฒๅฏน่ฏ่พ“ๅ…ฅๆ ผๅผ๏ผŒๆ‰€ไปฅ `ThreadContext` ๅ’Œ `BackendInterface` ้œ€่ฆๆˆๅฏนๅœฐไฝฟ็”จใ€‚Server ้™คไบ†่‡ชๅทฑ็š„ ktransformers ไน‹ๅค–๏ผŒ่ฟ˜ๆ”ฏๆŒ transformersใ€‚ๅฆ‚ๆžœ่ฆๅฏนๆŽฅๅ…ถไป–็š„ๆจกๅž‹ๆŽจ็†ๆก†ๆžถ๏ผŒๅฏไปฅๅ‚่€ƒๅœจ [transformers.py](https://github.com/kvcache-ai/ktransformers-dev/blob/main/ktransformers/server/backend/interfaces/transformers.py) ไธญ`TransformersInterface`ๅ’Œ`TransformersThreadContext`็š„ๅฎž็Žฐใ€‚ + + + diff --git a/doc/zh/api/server/tabby.md b/doc/zh/api/server/tabby.md new file mode 100644 index 0000000..50dbd9a --- /dev/null +++ b/doc/zh/api/server/tabby.md @@ -0,0 +1,34 @@ +# ๅฆ‚ไฝ•ไฝฟ็”จ Tabby ๅ’Œ ktransformers ๅœจๆœฌๅœฐๅˆฉ็”จ 236B ็š„ๅคงๆจกๅž‹ๅšไปฃ็ ่กฅๅ…จ๏ผŸ + +[Tabby](https://tabby.tabbyml.com/docs/welcome/) ๆ˜ฏไธ€ไธชๅผ€ๆบ็š„ไปฃ็ ๅŠฉๆ‰‹๏ผŒ็”จๆˆทๅฏไปฅๆ‰‹ๅŠจ้…็ฝฎๅŽ็ซฏไฝฟ็”จ็š„ๆก†ๆžถๅŠๆจกๅž‹๏ผŒๅนถๅœจๅคšไธช IDE/็ผ–่พ‘ๅ™จ ไธŠไฝฟ็”จ๏ผŒไพ‹ๅฆ‚ VSCode ๅ’Œ InteliJใ€‚ๅ› ไธบ Tabby ๅœจๆก†ๆžถไพงๅฏไปฅๅฏนๆŽฅๅˆฐ Ollama๏ผŒๅนถไธ” ktransformers server ๆไพ›ๅ’Œ Ollama ไธ€่‡ด็š„ API ๆŽฅๅฃ๏ผŒๆ‰€ไปฅๆˆ‘ไปฌๅฏไปฅๅฐ† Tabby ๅฏนๆŽฅๅˆฐ ktransformers serverใ€‚ๅนถๅœจไปฃ็ ่กฅๅ…จ็š„ๅœบๆ™ฏไธญไฝ“้ชŒๅˆฐ ktransformers ๅฟซ้€Ÿ็š„ๅผ‚ๆž„ๆŽจ็†ใ€‚ + +1. ๅฏๅŠจ ktransformersใ€‚ +```bash +./ktransformers --port 9112 +``` +2. ๅฎ‰่ฃ… Tabby๏ผšๆŒ‰็…ง Tabby ็š„ๅฎ˜ๆ–นๆ•™็จ‹ๅœจๅธฆๆœ‰่‹ฑไผŸ่พพ GPU ็š„ Linux ๆœๅŠกๅ™จๆˆ–่€… Windows PC ไธŠ[ๅฎ‰่ฃ… Tabby](https://tabby.tabbyml.com/docs/quick-start/installation/linux/)ใ€‚ +3. ้…็ฝฎ Tabby๏ผšๅˆ›ๅปบ`~/.tabby/config.toml`๏ผŒๅนถๅŠ ๅ…ฅไปฅไธ‹้…็ฝฎใ€‚ +```toml +[model.completion.http] +kind = "ollama/completion" +api_endpoint = "http://127.0.0.1:9112/" +model_name = "DeepSeek-Coder-V2-Instruct" +prompt_template = "<๏ฝœfimโ–begin๏ฝœ>{prefix}<๏ฝœfimโ–hole๏ฝœ>{suffix}<๏ฝœfimโ–end๏ฝœ>" # Prompt Template +``` + +ๅœจ่ฟ™ไธช้…็ฝฎไธญ๏ผŒ`kind` ๆŒ‡ๆ˜Ž ktransformers ไฝฟ็”จ Ollama ็š„ๆ ‡ๅ‡† API ไธบ Tabby ๆไพ›ๆœๅŠก๏ผ›`api_endpoint` ไธŽ ktransforer ๅฏๅŠจๆ—ถ็ป‘ๅฎš็š„ๆŽฅๅฃไฟๆŒไธ€่‡ด๏ผ›`model_name` ่ฎพ็ฝฎไธบ ktransformers ไฝฟ็”จ็š„ๆจกๅž‹๏ผŒ่ฟ™้‡Œไฝฟ็”จ `DeepSeek-Coder-V2-Instruct` ไฝœไธบๅŽๅฐๆŽจ็†็š„ๆจกๅž‹๏ผ›`prompt_template` ๆ˜ฏๆจกๅž‹็š„ๆ็คบ่ฏๆจกๆฟ๏ผŒ้’ˆๅฏนไธๅŒ็š„ๆจกๅž‹๏ผŒไฝฟ็”จ็›ธๅฏนๅบ”็š„ๆจก็‰ˆๆ‰่ƒฝๆญฃๅธธไฝฟ็”จๆจกๅž‹ Fill In the Middle ็š„ๅŠŸ่ƒฝใ€‚ +ๅœจ่ฟ™้‡Œๆผ”็คบ็š„ๆ˜ฏ Tabby ไฝฟ็”จ Ollama API ๆไพ› Completion ๅŠŸ่ƒฝ็š„็›ธๅ…ณ้…็ฝฎ๏ผŒๆœ‰ๅ…ณ Tabby ๅ…ถไป–ๅฏ้€‰ๅŠŸ่ƒฝ็š„้…็ฝฎไฟกๆฏ่ฏทๅ‚็…ง[่ฟ™้‡Œ](https://tabby.tabbyml.com/docs/administration/model/)ใ€‚ + + +4. ๅฏๅŠจ Tabby ๆœๅŠก๏ผš`./tabby serve`ใ€‚ +image-20240709112329577 + +โ€‹ ๅฏๅŠจไน‹ๅŽ๏ผŒๆœŸๆœ›ไผšๅœจ ktransformers ็š„ๅ‘ฝไปค่กŒ็•Œ้ข็œ‹ๅˆฐๅฏน `/api/tags` ๆŽฅๅฃ็š„่ฎฟ้—ฎ(ๅœจ Tabby ๆ–ฐ็‰ˆๆœฌ v0.13.0 ไธญๅ˜ไธบๅฏน `/api/show/` ๆŽฅๅฃ็š„่ฎฟ้—ฎ)ใ€‚ +image-20240709111648215 + +6. ๆณจๅ†Œ Tabby ่ดฆๆˆท๏ผŒ่Žทๅ– Token๏ผšๅœจๅฏๅŠจ Tabby ๆœๅŠกๅŽ๏ผŒๅœจๆต่งˆๅ™จไธญๆ‰“ๅผ€็›ธๅบ”็š„้“พๆŽฅ(ๅฆ‚ไธŠๅ›พ็š„ 0.0.0.0:8080)๏ผŒๅนถๅ‚็…ง[ๆ•™็จ‹](https://tabby.tabbyml.com/docs/quick-start/register-account/) ๅˆ›ๅปบ็”จๆˆทๅนถ่Žทๅ– Tokenใ€‚ + +7. ๅฏๅŠจ VScode ๅฎ‰่ฃ… Tabby ๆ‹“ๅฑ•ๆ’ไปถ๏ผŒๅนถๅœจ็›ธๅ…ณๆ็คบไธ‹๏ผŒไฝฟ็”จไธŠไธ€ๆญฅ่Žทๅพ—็š„ Token ่ฟžๆŽฅ Tabby Server๏ผŒๅ‚็…ง[่ฟ™้‡Œ](https://tabby.tabbyml.com/docs/extensions/installation/vscode/)ใ€‚ + +8. ๆ‰“ๅผ€ไปปๆ„ไปฃ็ ๆ–‡ไปถ๏ผŒไฝ“้ชŒ ktransformers ็š„ๅฟซ้€Ÿๅผ‚ๆž„ๆŽจ็†ใ€‚ + diff --git a/doc/zh/api/server/visit-api-tags.png b/doc/zh/api/server/visit-api-tags.png new file mode 100644 index 0000000..d763dd2 Binary files /dev/null and b/doc/zh/api/server/visit-api-tags.png differ diff --git a/doc/zh/api/server/website.md b/doc/zh/api/server/website.md new file mode 100644 index 0000000..bd380cd --- /dev/null +++ b/doc/zh/api/server/website.md @@ -0,0 +1,32 @@ +# Start with website + +This document provides the necessary steps to set up and run the web service for this project. + +## 1. Starting the Web Service + +### 1.1. Compiling the Web Code + +Before you can compile the web code, make sure you have installed [Node.js](https://nodejs.org) version 18.3 or higher + +Once npm is installed, navigate to the `ktransformers/website` directory: + +```bash +cd ktransformers/website +``` + +Next, install the Vue CLI with the following command: + +```bash +npm install @vue/cli +``` + +Now you can build the project: + +```bash +npm run build +``` +Finally you can build ktransformers with website: +``` +cd ../../ +pip install . +``` diff --git a/install.sh b/install.sh new file mode 100644 index 0000000..d8cceef --- /dev/null +++ b/install.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -e + +# clear build dirs +rm -rf ktransformers/ktransformers_ext/build +rm -rf ktransformers/ktransformers_ext/cuda/build +rm -rf ktransformers/ktransformers_ext/cuda/dist +rm -rf ktransformers/ktransformers_ext/cuda/*.egg-info + +echo "Installing python dependencies from requirements.txt" +pip install -r requirements-local_chat.txt + +echo "Installing ktransformers cpuinfer" +mkdir -p ktransformers/ktransformers_ext/build +cd ktransformers/ktransformers_ext/build +cmake .. +cmake --build . --config Release + +echo "Installing ktransformers gpu kernel, this may take for a while, please wait" +sleep 3 + +cd ../cuda +python setup.py install +cd ../../.. +echo "Installation completed successfully" \ No newline at end of file diff --git a/ktransformers/__init__.py b/ktransformers/__init__.py new file mode 100644 index 0000000..a68927d --- /dev/null +++ b/ktransformers/__init__.py @@ -0,0 +1 @@ +__version__ = "0.1.0" \ No newline at end of file diff --git a/ktransformers/configs/config.yaml b/ktransformers/configs/config.yaml new file mode 100644 index 0000000..3a4816c --- /dev/null +++ b/ktransformers/configs/config.yaml @@ -0,0 +1,37 @@ +log: + dir: "logs" + file: "lexllama.log" + #log level: debug, info, warn, error, crit + level: "debug" + backup_count: -1 + +server: + ip: 0.0.0.0 + port: 12456 + +db: + type: "sqllite" + database: "server.db" + host: "./" + pool_size: 10 + +user: + secret_key: "981f1dd2a44e27d68759d0252a486568ed43480b4e616a26e3af3709c3a7ce73" + algorithm: "HS256" + +model: + # type: transformers + type: ktransformers + + name: DeepSeek-Coder-V2-Instruct + path: /mnt/data/model/DeepSeek-Coder-V2-Instruct/ + gguf_path: /mnt/data/model/DeepSeek-Coder-V2-GGUF-WJH/ + + device: cuda:0 + +web: + mount: False + open_cross_domain: True + +ext: + cpu_infer: 10 \ No newline at end of file diff --git a/ktransformers/configs/log_config.ini b/ktransformers/configs/log_config.ini new file mode 100644 index 0000000..8b5b2e0 --- /dev/null +++ b/ktransformers/configs/log_config.ini @@ -0,0 +1,46 @@ +[loggers] +keys=root,uvicorn,uvicornError,uvicornAccess + +[handlers] +keys=consoleHandler,fileHandler + +[formatters] +keys=detailedFormatter + +[logger_root] +level=INFO +handlers=consoleHandler + +[logger_uvicorn] +level=INFO +handlers=consoleHandler,fileHandler +qualname=uvicorn +propagate=0 + +[logger_uvicornError] +level=ERROR +handlers=consoleHandler,fileHandler +qualname=uvicorn.error +propagate=0 + +[logger_uvicornAccess] +level=INFO +handlers=consoleHandler,fileHandler +qualname=uvicorn.access +propagate=0 + +[handler_consoleHandler] +class=StreamHandler +level=INFO +formatter=detailedFormatter +args=(sys.stdout,) + +[handler_fileHandler] +class=logging.FileHandler +level=INFO +formatter=detailedFormatter +args=('uvicorn_logs.log', 'a') + +[formatter_detailedFormatter] +format=%(asctime)s - %(name)s - %(levelname)s - [%(filename)s:%(lineno)d] - %(message)s +datefmt=%Y-%m-%d %H:%M:%S diff --git a/ktransformers/ktransformers_ext/CMakeLists.txt b/ktransformers/ktransformers_ext/CMakeLists.txt new file mode 100644 index 0000000..3b6f54e --- /dev/null +++ b/ktransformers/ktransformers_ext/CMakeLists.txt @@ -0,0 +1,169 @@ +cmake_minimum_required(VERSION 3.16) +project(cpuinfer_ext VERSION 0.1.0) + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3") +set(CMAKE_BUILD_TYPE "Release") +include(CheckCXXCompilerFlag) +set(CMAKE_POSITION_INDEPENDENT_CODE ON) + + +option(LLAMA_NATIVE "llama: enable -march=native flag" ON) + +# Architecture specific +# TODO: probably these flags need to be tweaked on some architectures +# feel free to update the Makefile for your architecture and send a pull request or issue +message(STATUS "CMAKE_SYSTEM_PROCESSOR: ${CMAKE_SYSTEM_PROCESSOR}") +if (MSVC) + string(TOLOWER "${CMAKE_GENERATOR_PLATFORM}" CMAKE_GENERATOR_PLATFORM_LWR) + message(STATUS "CMAKE_GENERATOR_PLATFORM: ${CMAKE_GENERATOR_PLATFORM}") +else () + set(CMAKE_GENERATOR_PLATFORM_LWR "") +endif () + +if (NOT MSVC) + if (LLAMA_STATIC) + add_link_options(-static) + if (MINGW) + add_link_options(-static-libgcc -static-libstdc++) + endif() + endif() + if (LLAMA_GPROF) + add_compile_options(-pg) + endif() +endif() + +set(ARCH_FLAGS "") + +if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64" OR CMAKE_GENERATOR_PLATFORM_LWR STREQUAL "arm64" OR + (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND + CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm.*|ARM64)$")) + message(STATUS "ARM detected") + if (MSVC) + add_compile_definitions(__aarch64__) # MSVC defines _M_ARM64 instead + add_compile_definitions(__ARM_NEON) + add_compile_definitions(__ARM_FEATURE_FMA) + + set(CMAKE_REQUIRED_FLAGS_PREV ${CMAKE_REQUIRED_FLAGS}) + string(JOIN " " CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS} "/arch:armv8.2") + check_cxx_source_compiles("#include \nint main() { int8x16_t _a, _b; int32x4_t _s = vdotq_s32(_s, _a, _b); return 0; }" GGML_COMPILER_SUPPORT_DOTPROD) + if (GGML_COMPILER_SUPPORT_DOTPROD) + add_compile_definitions(__ARM_FEATURE_DOTPROD) + endif () + check_cxx_source_compiles("#include \nint main() { float16_t _a; float16x8_t _s = vdupq_n_f16(_a); return 0; }" GGML_COMPILER_SUPPORT_FP16_VECTOR_ARITHMETIC) + if (GGML_COMPILER_SUPPORT_FP16_VECTOR_ARITHMETIC) + add_compile_definitions(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + endif () + set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_PREV}) + else() + check_cxx_compiler_flag(-mfp16-format=ieee COMPILER_SUPPORTS_FP16_FORMAT_I3E) + if (NOT "${COMPILER_SUPPORTS_FP16_FORMAT_I3E}" STREQUAL "") + list(APPEND ARCH_FLAGS -mfp16-format=ieee) + endif() + if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv6") + # Raspberry Pi 1, Zero + list(APPEND ARCH_FLAGS -mfpu=neon-fp-armv8 -mno-unaligned-access) + endif() + if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv7") + if ("${CMAKE_SYSTEM_NAME}" STREQUAL "Android") + # Android armeabi-v7a + list(APPEND ARCH_FLAGS -mfpu=neon-vfpv4 -mno-unaligned-access -funsafe-math-optimizations) + else() + # Raspberry Pi 2 + list(APPEND ARCH_FLAGS -mfpu=neon-fp-armv8 -mno-unaligned-access -funsafe-math-optimizations) + endif() + endif() + if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv8") + # Android arm64-v8a + # Raspberry Pi 3, 4, Zero 2 (32-bit) + list(APPEND ARCH_FLAGS -mno-unaligned-access) + endif() + endif() +elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LWR MATCHES "^(x86_64|i686|amd64|x64|win32)$" OR + (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND + CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|i686|AMD64)$")) + message(STATUS "x86 detected") + if (MSVC) + # instruction set detection for MSVC only + if (LLAMA_NATIVE) + include(cmake/FindSIMD.cmake) + endif () + if (LLAMA_AVX512) + list(APPEND ARCH_FLAGS /arch:AVX512) + # MSVC has no compile-time flags enabling specific + # AVX512 extensions, neither it defines the + # macros corresponding to the extensions. + # Do it manually. + if (LLAMA_AVX512_VBMI) + add_compile_definitions($<$:__AVX512VBMI__>) + add_compile_definitions($<$:__AVX512VBMI__>) + endif() + if (LLAMA_AVX512_VNNI) + add_compile_definitions($<$:__AVX512VNNI__>) + add_compile_definitions($<$:__AVX512VNNI__>) + endif() + elseif (LLAMA_AVX2) + list(APPEND ARCH_FLAGS /arch:AVX2) + elseif (LLAMA_AVX) + list(APPEND ARCH_FLAGS /arch:AVX) + endif() + else() + if (LLAMA_NATIVE) + list(APPEND ARCH_FLAGS -march=native) + endif() + if (LLAMA_F16C) + list(APPEND ARCH_FLAGS -mf16c) + endif() + if (LLAMA_FMA) + list(APPEND ARCH_FLAGS -mfma) + endif() + if (LLAMA_AVX) + list(APPEND ARCH_FLAGS -mavx) + endif() + if (LLAMA_AVX2) + list(APPEND ARCH_FLAGS -mavx2) + endif() + if (LLAMA_AVX512) + list(APPEND ARCH_FLAGS -mavx512f) + list(APPEND ARCH_FLAGS -mavx512bw) + endif() + if (LLAMA_AVX512_VBMI) + list(APPEND ARCH_FLAGS -mavx512vbmi) + endif() + if (LLAMA_AVX512_VNNI) + list(APPEND ARCH_FLAGS -mavx512vnni) + endif() + endif() +elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64") + message(STATUS "PowerPC detected") + if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64le") + list(APPEND ARCH_FLAGS -mcpu=powerpc64le) + else() + list(APPEND ARCH_FLAGS -mcpu=native -mtune=native) + #TODO: Add targets for Power8/Power9 (Altivec/VSX) and Power10(MMA) and query for big endian systems (ppc64/le/be) + endif() +else() + message(STATUS "Unknown architecture") +endif() + +find_package(CUDA REQUIRED) + +add_compile_options("$<$:${ARCH_FLAGS}>") +add_compile_options("$<$:${ARCH_FLAGS}>") + +add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../../third_party/pybind11 ${CMAKE_CURRENT_BINARY_DIR}/third_party/pybind11) +add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../../third_party/llama.cpp ${CMAKE_CURRENT_BINARY_DIR}/third_party/llama.cpp) + +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../../third_party) +include_directories("${CUDA_INCLUDE_DIRS}") + +aux_source_directory(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_DIR1) +aux_source_directory(${CMAKE_CURRENT_SOURCE_DIR}/cpu_backend SOURCE_DIR2) +aux_source_directory(${CMAKE_CURRENT_SOURCE_DIR}/operators/llamafile SOURCE_DIR3) +aux_source_directory(${CMAKE_CURRENT_SOURCE_DIR}/../../third_party/llamafile SOURCE_DIR4) +set(ALL_SOURCES ${SOURCE_DIR1} ${SOURCE_DIR2} ${SOURCE_DIR3} ${SOURCE_DIR4}) +message(STATUS "ALL_SOURCES: ${ALL_SOURCES}") + +pybind11_add_module(${PROJECT_NAME} MODULE ${ALL_SOURCES}) +target_link_libraries(${PROJECT_NAME} PRIVATE llama) +target_link_libraries(${PROJECT_NAME} PRIVATE "/usr/local/cuda/lib64/libcudart.so") \ No newline at end of file diff --git a/ktransformers/ktransformers_ext/bench/bench_linear.py b/ktransformers/ktransformers_ext/bench/bench_linear.py new file mode 100644 index 0000000..0a4de3a --- /dev/null +++ b/ktransformers/ktransformers_ext/bench/bench_linear.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Description : +Author : chenht2022 +Date : 2024-07-25 10:31:59 +Version : 1.0.0 +LastEditors : chenht2022 +LastEditTime : 2024-07-25 10:32:51 +Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +''' +import os, sys +import time +sys.path.append(os.path.dirname(__file__) + '/../build') +import cpuinfer_ext +import torch + +def bench_linear(quant_mode: str): + with torch.inference_mode(mode=True): + input_size = 16384 + output_size = 5120 + stride = 16 + layer_num = 10 + CPUInfer = cpuinfer_ext.CPUInfer(64) + warm_up_iter = 1000 + test_iter = 10000 + + hidden_type = 30 # ggml_type::GGML_TYPE_BF16 + if quant_mode == "fp32": + proj_type = 0 # ggml_type::GGML_TYPE_F32 + bytes_per_elem = 4.000000 + elif quant_mode == "fp16": + proj_type = 1 # ggml_type::GGML_TYPE_F16 + bytes_per_elem = 2.000000 + elif quant_mode == "bf16": + proj_type = 30 # ggml_type::GGML_TYPE_BF16 + bytes_per_elem = 2.000000 + elif quant_mode == "q8_0": + proj_type = 8 # ggml_type::GGML_TYPE_Q8_0 + bytes_per_elem = 1.062500 + elif quant_mode == "q6_k": + proj_type = 14 # ggml_type::GGML_TYPE_Q6_K + bytes_per_elem = 0.820312 + elif quant_mode == "q5_k_m": + proj_type = 13 # ggml_type::GGML_TYPE_Q5_K + bytes_per_elem = 0.687500 + elif quant_mode == "q4_k_m": + proj_type = 12 # ggml_type::GGML_TYPE_Q4_K + bytes_per_elem = 0.562500 + elif quant_mode == "q3_k_m": + proj_type = 11 # ggml_type::GGML_TYPE_Q3_K + bytes_per_elem = 0.429688 + elif quant_mode == "q2_k": + proj_type = 10 # ggml_type::GGML_TYPE_Q2_K + bytes_per_elem = 0.328125 + elif quant_mode == "iq3_xs": + proj_type = 21 # ggml_type::GGML_TYPE_IQ3_S + bytes_per_elem = 0.429688 + elif quant_mode == "iq2_xxs": + proj_type = 16 # ggml_type::GGML_TYPE_IQ2_XXS + bytes_per_elem = 0.257812 + else: + assert(False) + + linears = [] + projs = [] + for _ in range(layer_num): + proj = torch.randn((output_size, input_size), dtype=torch.float32, device = "cuda").to("cpu").contiguous() + config = cpuinfer_ext.linear.LinearConfig(input_size, output_size, stride, proj.data_ptr(), proj_type, hidden_type) + linear = cpuinfer_ext.linear.Linear(config) + projs.append(proj) + linears.append(linear) + + # warm up + for i in range(warm_up_iter): + linear = linears[i % layer_num] + input = torch.randn((1, input_size), dtype=torch.bfloat16).contiguous() + output = torch.empty((1, output_size), dtype=torch.bfloat16).contiguous() + CPUInfer.submit(linear.forward, input.data_ptr(), output.data_ptr()) + CPUInfer.sync() + + # test + total_time = 0 + for i in range(test_iter): + linear = linears[i % layer_num] + input = torch.randn((1, input_size), dtype=torch.bfloat16).contiguous() + output = torch.empty((1, output_size), dtype=torch.bfloat16).contiguous() + start = time.perf_counter() + CPUInfer.submit(linear.forward, input.data_ptr(), output.data_ptr()) + CPUInfer.sync() + end = time.perf_counter() + total_time += end - start + print('Quant mode: ', quant_mode) + print('Time(s): ', total_time) + print('Iteration: ', test_iter) + print('Time(us) per iteration: ', total_time / test_iter * 1000000) + print('Bandwidth: ', input_size * output_size * bytes_per_elem * test_iter / total_time / 1000 / 1000 / 1000, 'GB/s') + print('') + +bench_linear("fp32") +bench_linear("fp16") +bench_linear("bf16") +bench_linear("q8_0") +bench_linear("q6_k") +bench_linear("q5_k_m") +bench_linear("q4_k_m") +bench_linear("q3_k_m") +bench_linear("q2_k") +# Not supported on __x86_64__ +# bench_linear("iq3_xs") +# bench_linear("iq2_xxs") diff --git a/ktransformers/ktransformers_ext/bench/bench_linear_torch.py b/ktransformers/ktransformers_ext/bench/bench_linear_torch.py new file mode 100644 index 0000000..cb3e4ef --- /dev/null +++ b/ktransformers/ktransformers_ext/bench/bench_linear_torch.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Description : +Author : chenht2022 +Date : 2024-07-25 10:31:59 +Version : 1.0.0 +LastEditors : chenht2022 +LastEditTime : 2024-07-25 10:32:48 +Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +''' +import os, sys +import time +import torch +import torch.nn.quantized as nnq + +def bench_linear(quant_mode: str): + with torch.inference_mode(mode=True): + input_size = 16384 + output_size = 5120 + layer_num = 10 + warm_up_iter = 1000 + test_iter = 10000 + + if quant_mode == "fp32": + proj_type = torch.float32 + bytes_per_elem = 4.000000 + elif quant_mode == "fp16": + proj_type = torch.float16 + bytes_per_elem = 2.000000 + elif quant_mode == "bf16": + proj_type = torch.bfloat16 + bytes_per_elem = 2.000000 + elif quant_mode == "qint8": + proj_type = torch.qint8 + bytes_per_elem = 1.000000 + else: + assert(False) + + projs = [] + for _ in range(layer_num): + proj = torch.randn((output_size, input_size), dtype = torch.float32, device = "cuda").to("cpu").contiguous() + if quant_mode == "qint8": + scale, zero_point = 0.1, 0 # Adjust scale and zero_point based on your dataset + proj_q = torch.quantize_per_tensor(proj, scale, zero_point, torch.qint8) + quantized_layer = nnq.Linear(input_size, output_size) + quantized_layer.set_weight_bias(proj_q, None) + projs.append(quantized_layer) + else: + projs.append(proj.to(proj_type)) + + # warm up + for i in range(warm_up_iter): + input = torch.randn((1, input_size), dtype=torch.float32).contiguous() + if quant_mode == "qint8": + input_q = torch.quantize_per_tensor(input, scale, zero_point, torch.quint8) + quantized_layer = projs[i % layer_num] + t_output = quantized_layer(input_q) + else: + t_output = torch.mm(input.to(proj_type), projs[i % layer_num].t()) + + # test + total_time = 0 + for i in range(test_iter): + input = torch.randn((1, input_size), dtype=torch.float32).contiguous() + start = time.perf_counter() + if quant_mode == "qint8": + input_q = torch.quantize_per_tensor(input, scale, zero_point, torch.quint8) + quantized_layer = projs[i % layer_num] + t_output = quantized_layer(input_q) + else: + t_output = torch.mm(input.to(proj_type), projs[i % layer_num].t()) + end = time.perf_counter() + total_time += end - start + print('Quant mode: ', quant_mode) + print('Time(s): ', total_time) + print('Iteration: ', test_iter) + print('Time(us) per iteration: ', total_time / test_iter * 1000000) + print('Bandwidth: ', input_size * output_size * bytes_per_elem * test_iter / total_time / 1000 / 1000 / 1000, 'GB/s') + print('') + +bench_linear("fp32") +bench_linear("fp16") +bench_linear("bf16") +bench_linear("qint8") diff --git a/ktransformers/ktransformers_ext/bench/bench_mlp.py b/ktransformers/ktransformers_ext/bench/bench_mlp.py new file mode 100644 index 0000000..5680a9b --- /dev/null +++ b/ktransformers/ktransformers_ext/bench/bench_mlp.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Description : +Author : chenht2022 +Date : 2024-07-16 10:43:18 +Version : 1.0.0 +LastEditors : chenht2022 +LastEditTime : 2024-07-25 10:32:55 +Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +''' +import os, sys +import time +sys.path.append(os.path.dirname(__file__) + '/../build') +import cpuinfer_ext +import torch + +def bench_mlp(quant_mode: str): + with torch.inference_mode(mode=True): + hidden_size = 5120 + intermediate_size = 3072 + stride = 16 + layer_num = 10 + CPUInfer = cpuinfer_ext.CPUInfer(64) + warm_up_iter = 1000 + test_iter = 10000 + + hidden_type = 30 # ggml_type::GGML_TYPE_BF16 + if quant_mode == "fp32": + gate_type = 0 # ggml_type::GGML_TYPE_F32 + up_type = 0 # ggml_type::GGML_TYPE_F32 + down_type = 0 # ggml_type::GGML_TYPE_F32 + bytes_per_elem = 4.000000 + elif quant_mode == "fp16": + gate_type = 1 # ggml_type::GGML_TYPE_F16 + up_type = 1 # ggml_type::GGML_TYPE_F16 + down_type = 1 # ggml_type::GGML_TYPE_F16 + bytes_per_elem = 2.000000 + elif quant_mode == "bf16": + gate_type = 30 # ggml_type::GGML_TYPE_BF16 + up_type = 30 # ggml_type::GGML_TYPE_BF16 + down_type = 30 # ggml_type::GGML_TYPE_BF16 + bytes_per_elem = 2.000000 + elif quant_mode == "q8_0": + gate_type = 8 # ggml_type::GGML_TYPE_Q8_0 + up_type = 8 # ggml_type::GGML_TYPE_Q8_0 + down_type = 8 # ggml_type::GGML_TYPE_Q8_0 + bytes_per_elem = 1.062500 + elif quant_mode == "q6_k": + gate_type = 14 # ggml_type::GGML_TYPE_Q6_K + up_type = 14 # ggml_type::GGML_TYPE_Q6_K + down_type = 14 # ggml_type::GGML_TYPE_Q6_K + bytes_per_elem = 0.820312 + elif quant_mode == "q5_k_m": + gate_type = 13 # ggml_type::GGML_TYPE_Q5_K + up_type = 13 # ggml_type::GGML_TYPE_Q5_K + down_type = 14 # ggml_type::GGML_TYPE_Q6_K + bytes_per_elem = 0.731771 + elif quant_mode == "q4_k_m": + gate_type = 12 # ggml_type::GGML_TYPE_Q4_K + up_type = 12 # ggml_type::GGML_TYPE_Q4_K + down_type = 14 # ggml_type::GGML_TYPE_Q6_K + bytes_per_elem = 0.648437 + elif quant_mode == "q3_k_m": + gate_type = 11 # ggml_type::GGML_TYPE_Q3_K + up_type = 11 # ggml_type::GGML_TYPE_Q3_K + down_type = 13 # ggml_type::GGML_TYPE_Q5_K + bytes_per_elem = 0.515625 + elif quant_mode == "q2_k": + gate_type = 10 # ggml_type::GGML_TYPE_Q2_K + up_type = 10 # ggml_type::GGML_TYPE_Q2_K + down_type = 11 # ggml_type::GGML_TYPE_Q3_K + bytes_per_elem = 0.328125 + elif quant_mode == "iq3_xs": + gate_type = 21 # ggml_type::GGML_TYPE_IQ3_S + up_type = 21 # ggml_type::GGML_TYPE_IQ3_S + down_type = 21 # ggml_type::GGML_TYPE_IQ3_S + bytes_per_elem = 0.429688 + elif quant_mode == "iq2_xxs": + gate_type = 16 # ggml_type::GGML_TYPE_IQ2_XXS + up_type = 16 # ggml_type::GGML_TYPE_IQ2_XXS + down_type = 16 # ggml_type::GGML_TYPE_IQ2_XXS + bytes_per_elem = 0.257812 + else: + assert(False) + + + mlps = [] + gate_projs = [] + up_projs = [] + down_projs = [] + for _ in range(layer_num): + gate_proj = torch.randn((intermediate_size, hidden_size), dtype=torch.float32, device = "cuda").to("cpu").contiguous() + up_proj = torch.randn((intermediate_size, hidden_size), dtype=torch.float32, device = "cuda").to("cpu").contiguous() + down_proj = torch.randn((hidden_size, intermediate_size), dtype=torch.float32, device = "cuda").to("cpu").contiguous() + config = cpuinfer_ext.mlp.MLPConfig(hidden_size, intermediate_size, stride, gate_proj.data_ptr(), up_proj.data_ptr(), down_proj.data_ptr(), gate_type, up_type, down_type, hidden_type) + mlp = cpuinfer_ext.mlp.MLP(config) + gate_projs.append(gate_proj) + up_projs.append(up_proj) + down_projs.append(down_proj) + mlps.append(mlp) + + # warm up + for i in range(warm_up_iter): + mlp = mlps[i % layer_num] + input = torch.randn((1, hidden_size), dtype=torch.bfloat16).contiguous() + output = torch.empty((1, hidden_size), dtype=torch.bfloat16).contiguous() + CPUInfer.submit(mlp.forward, input.data_ptr(), output.data_ptr()) + CPUInfer.sync() + + # test + total_time = 0 + for i in range(test_iter): + mlp = mlps[i % layer_num] + input = torch.randn((1, hidden_size), dtype=torch.bfloat16).contiguous() + output = torch.empty((1, hidden_size), dtype=torch.bfloat16).contiguous() + start = time.perf_counter() + CPUInfer.submit(mlp.forward, input.data_ptr(), output.data_ptr()) + CPUInfer.sync() + end = time.perf_counter() + total_time += end - start + print('Quant mode: ', quant_mode) + print('Time(s): ', total_time) + print('Iteration: ', test_iter) + print('Time(us) per iteration: ', total_time / test_iter * 1000000) + print('Bandwidth: ', hidden_size * intermediate_size * 3 * bytes_per_elem * test_iter / total_time / 1000 / 1000 / 1000, 'GB/s') + print('') + +bench_mlp("fp32") +bench_mlp("fp16") +bench_mlp("bf16") +bench_mlp("q8_0") +bench_mlp("q6_k") +bench_mlp("q5_k_m") +bench_mlp("q4_k_m") +bench_mlp("q3_k_m") +bench_mlp("q2_k") +# Not supported on __x86_64__ +# bench_linear("iq3_xs") +# bench_linear("iq2_xxs") diff --git a/ktransformers/ktransformers_ext/bench/bench_mlp_torch.py b/ktransformers/ktransformers_ext/bench/bench_mlp_torch.py new file mode 100644 index 0000000..3aad58c --- /dev/null +++ b/ktransformers/ktransformers_ext/bench/bench_mlp_torch.py @@ -0,0 +1,130 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Description : +Author : chenht2022 +Date : 2024-07-16 10:43:18 +Version : 1.0.0 +LastEditors : chenht2022 +LastEditTime : 2024-07-25 10:32:53 +Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +''' +import os, sys +import time +import torch +import torch.nn.quantized as nnq + +def act_fn(x): + return x / (1.0 + torch.exp(-x)) + +def bench_mlp(quant_mode: str): + with torch.inference_mode(mode=True): + hidden_size = 5120 + intermediate_size = 3072 + layer_num = 10 + warm_up_iter = 1000 + test_iter = 10000 + + if quant_mode == "fp32": + proj_type = torch.float32 + bytes_per_elem = 4.000000 + elif quant_mode == "fp16": + proj_type = torch.float16 + bytes_per_elem = 2.000000 + elif quant_mode == "bf16": + proj_type = torch.bfloat16 + bytes_per_elem = 2.000000 + elif quant_mode == "qint8": + proj_type = torch.qint8 + bytes_per_elem = 1.000000 + else: + assert(False) + + gate_projs = [] + up_projs = [] + down_projs = [] + for _ in range(layer_num): + gate_proj = torch.randn((intermediate_size, hidden_size), dtype=torch.float32, device = "cuda").to("cpu").contiguous() + up_proj = torch.randn((intermediate_size, hidden_size), dtype=torch.float32, device = "cuda").to("cpu").contiguous() + down_proj = torch.randn((hidden_size, intermediate_size), dtype=torch.float32, device = "cuda").to("cpu").contiguous() + if quant_mode == "qint8": + scale, zero_point = 0.1, 0 # Adjust scale and zero_point based on your dataset + gate_proj_q = torch.quantize_per_tensor(gate_proj, scale, zero_point, torch.qint8) + quantized_gate = nnq.Linear(hidden_size, intermediate_size) + quantized_gate.set_weight_bias(gate_proj_q, None) + up_proj_q = torch.quantize_per_tensor(up_proj, scale, zero_point, torch.qint8) + quantized_up = nnq.Linear(hidden_size, intermediate_size) + quantized_up.set_weight_bias(up_proj_q, None) + down_proj_q = torch.quantize_per_tensor(down_proj, scale, zero_point, torch.qint8) + quantized_down = nnq.Linear(intermediate_size, hidden_size) + quantized_down.set_weight_bias(down_proj_q, None) + gate_projs.append(quantized_gate) + up_projs.append(quantized_up) + down_projs.append(quantized_down) + else: + gate_projs.append(gate_proj.to(proj_type)) + up_projs.append(up_proj.to(proj_type)) + down_projs.append(down_proj.to(proj_type)) + + # warm up + for i in range(warm_up_iter): + input = torch.randn((1, hidden_size), dtype=torch.float32).contiguous() + if quant_mode == "qint8": + input_q = torch.quantize_per_tensor(input, scale, zero_point, torch.quint8) + quantized_gate = gate_projs[i % layer_num] + gate_buf = quantized_gate(input_q) + quantized_up = up_projs[i % layer_num] + up_buf = quantized_gate(input_q) + gate_buf = gate_buf.dequantize() + up_buf = up_buf.dequantize() + intermediate = act_fn(gate_buf) * up_buf + intermediate_q = torch.quantize_per_tensor(intermediate, scale, zero_point, torch.quint8) + quantized_down = down_projs[i % layer_num] + t_output = quantized_down(intermediate_q) + else: + gate_proj = gate_projs[i%layer_num] + up_proj = up_projs[i%layer_num] + down_proj = down_projs[i%layer_num] + gate_buf = torch.mm(input.to(proj_type), gate_proj.t()) + up_buf = torch.mm(input.to(proj_type), up_proj.t()) + intermediate = act_fn(gate_buf) * up_buf + t_output = torch.mm(intermediate.to(proj_type), down_proj.t()) + + # test + total_time = 0 + for i in range(test_iter): + input = torch.randn((1, hidden_size), dtype=torch.float32).contiguous() + start = time.perf_counter() + if quant_mode == "qint8": + input_q = torch.quantize_per_tensor(input, scale, zero_point, torch.quint8) + quantized_gate = gate_projs[i % layer_num] + gate_buf = quantized_gate(input_q) + quantized_up = up_projs[i % layer_num] + up_buf = quantized_gate(input_q) + gate_buf = gate_buf.dequantize() + up_buf = up_buf.dequantize() + intermediate = act_fn(gate_buf) * up_buf + intermediate_q = torch.quantize_per_tensor(intermediate, scale, zero_point, torch.quint8) + quantized_down = down_projs[i % layer_num] + t_output = quantized_down(intermediate_q) + else: + gate_proj = gate_projs[i%layer_num] + up_proj = up_projs[i%layer_num] + down_proj = down_projs[i%layer_num] + gate_buf = torch.mm(input.to(proj_type), gate_proj.t()) + up_buf = torch.mm(input.to(proj_type), up_proj.t()) + intermediate = act_fn(gate_buf) * up_buf + t_output = torch.mm(intermediate.to(proj_type), down_proj.t()) + end = time.perf_counter() + total_time += end - start + print('Quant mode: ', quant_mode) + print('Time(s): ', total_time) + print('Iteration: ', test_iter) + print('Time(us) per iteration: ', total_time / test_iter * 1000000) + print('Bandwidth: ', hidden_size * intermediate_size * 3 * bytes_per_elem * test_iter / total_time / 1000 / 1000 / 1000, 'GB/s') + print('') + +bench_mlp("fp32") +bench_mlp("fp16") +bench_mlp("bf16") +bench_mlp("qint8") diff --git a/ktransformers/ktransformers_ext/bench/bench_moe.py b/ktransformers/ktransformers_ext/bench/bench_moe.py new file mode 100644 index 0000000..909f029 --- /dev/null +++ b/ktransformers/ktransformers_ext/bench/bench_moe.py @@ -0,0 +1,154 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Description : +Author : chenht2022 +Date : 2024-07-25 10:32:05 +Version : 1.0.0 +LastEditors : chenht2022 +LastEditTime : 2024-07-25 10:33:00 +Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +''' +import os, sys +import time +sys.path.append(os.path.dirname(__file__) + '/../build') +import cpuinfer_ext +import torch + +def bench_moe(quant_mode: str): + with torch.inference_mode(mode=True): + expert_num = 10 + hidden_size = 5120 + intermediate_size = 1536 + stride = 16 + group_min_len = 10 + group_max_len = 1024 + n_routed_experts = 6 + layer_num = 10 + qlen = 1 + CPUInfer = cpuinfer_ext.CPUInfer(64) + warm_up_iter = 1000 + test_iter = 10000 + + hidden_type = 30 # ggml_type::GGML_TYPE_BF16 + if quant_mode == "fp32": + gate_type = 0 # ggml_type::GGML_TYPE_F32 + up_type = 0 # ggml_type::GGML_TYPE_F32 + down_type = 0 # ggml_type::GGML_TYPE_F32 + bytes_per_elem = 4.000000 + elif quant_mode == "fp16": + gate_type = 1 # ggml_type::GGML_TYPE_F16 + up_type = 1 # ggml_type::GGML_TYPE_F16 + down_type = 1 # ggml_type::GGML_TYPE_F16 + bytes_per_elem = 2.000000 + elif quant_mode == "bf16": + gate_type = 30 # ggml_type::GGML_TYPE_BF16 + up_type = 30 # ggml_type::GGML_TYPE_BF16 + down_type = 30 # ggml_type::GGML_TYPE_BF16 + bytes_per_elem = 2.000000 + elif quant_mode == "q8_0": + gate_type = 8 # ggml_type::GGML_TYPE_Q8_0 + up_type = 8 # ggml_type::GGML_TYPE_Q8_0 + down_type = 8 # ggml_type::GGML_TYPE_Q8_0 + bytes_per_elem = 1.062500 + elif quant_mode == "q6_k": + gate_type = 14 # ggml_type::GGML_TYPE_Q6_K + up_type = 14 # ggml_type::GGML_TYPE_Q6_K + down_type = 14 # ggml_type::GGML_TYPE_Q6_K + bytes_per_elem = 0.820312 + elif quant_mode == "q5_k_m": + gate_type = 13 # ggml_type::GGML_TYPE_Q5_K + up_type = 13 # ggml_type::GGML_TYPE_Q5_K + down_type = 14 # ggml_type::GGML_TYPE_Q6_K + bytes_per_elem = 0.731771 + elif quant_mode == "q4_k_m": + gate_type = 12 # ggml_type::GGML_TYPE_Q4_K + up_type = 12 # ggml_type::GGML_TYPE_Q4_K + down_type = 14 # ggml_type::GGML_TYPE_Q6_K + bytes_per_elem = 0.648437 + elif quant_mode == "q3_k_m": + gate_type = 11 # ggml_type::GGML_TYPE_Q3_K + up_type = 11 # ggml_type::GGML_TYPE_Q3_K + down_type = 13 # ggml_type::GGML_TYPE_Q5_K + bytes_per_elem = 0.515625 + elif quant_mode == "q2_k": + gate_type = 10 # ggml_type::GGML_TYPE_Q2_K + up_type = 10 # ggml_type::GGML_TYPE_Q2_K + down_type = 11 # ggml_type::GGML_TYPE_Q3_K + bytes_per_elem = 0.328125 + elif quant_mode == "iq3_xs": + gate_type = 21 # ggml_type::GGML_TYPE_IQ3_S + up_type = 21 # ggml_type::GGML_TYPE_IQ3_S + down_type = 21 # ggml_type::GGML_TYPE_IQ3_S + bytes_per_elem = 0.429688 + elif quant_mode == "iq2_xxs": + gate_type = 16 # ggml_type::GGML_TYPE_IQ2_XXS + up_type = 16 # ggml_type::GGML_TYPE_IQ2_XXS + down_type = 16 # ggml_type::GGML_TYPE_IQ2_XXS + bytes_per_elem = 0.257812 + else: + assert(False) + + + moes = [] + gate_projs = [] + up_projs = [] + down_projs = [] + for _ in range(layer_num): + gate_proj = torch.randn((expert_num, intermediate_size, hidden_size), dtype=torch.float32, device = "cuda").to("cpu").contiguous() + up_proj = torch.randn((expert_num, intermediate_size, hidden_size), dtype=torch.float32, device = "cuda").to("cpu").contiguous() + down_proj = torch.randn((expert_num, hidden_size, intermediate_size), dtype=torch.float32, device = "cuda").to("cpu").contiguous() + config = cpuinfer_ext.moe.MOEConfig(expert_num, n_routed_experts, hidden_size, intermediate_size, stride, group_min_len, group_max_len, gate_proj.data_ptr(), up_proj.data_ptr(), down_proj.data_ptr(), gate_type, up_type, down_type, hidden_type) + moe = cpuinfer_ext.moe.MOE(config) + gate_projs.append(gate_proj) + up_projs.append(up_proj) + down_projs.append(down_proj) + moes.append(moe) + expert_ids = torch.randint(0, expert_num, (layer_num, qlen, n_routed_experts), dtype=torch.int64, device = "cuda").to("cpu").contiguous() + weights = torch.rand((layer_num, qlen, n_routed_experts), dtype=torch.float32, device = "cuda").to("cpu").contiguous() + input = torch.randn((layer_num, qlen, hidden_size), dtype=torch.bfloat16, device = "cuda").to("cpu").contiguous() + output = torch.empty((layer_num, qlen, hidden_size), dtype=torch.bfloat16, device = "cuda").to("cpu").contiguous() + + # warm up + for i in range(warm_up_iter): + CPUInfer.submit(moes[i % layer_num].forward, + qlen, + n_routed_experts, + expert_ids[i % layer_num].data_ptr(), + weights[i % layer_num].data_ptr(), + input[i % layer_num].data_ptr(), + output[i % layer_num].data_ptr()) + CPUInfer.sync() + + # test + start = time.perf_counter() + for i in range(test_iter): + CPUInfer.submit(moes[i % layer_num].forward, + qlen, + n_routed_experts, + expert_ids[i % layer_num].data_ptr(), + weights[i % layer_num].data_ptr(), + input[i % layer_num].data_ptr(), + output[i % layer_num].data_ptr()) + CPUInfer.sync() + end = time.perf_counter() + total_time = end - start + print('Quant mode: ', quant_mode) + print('Time(s): ', total_time) + print('Iteration: ', test_iter) + print('Time(us) per iteration: ', total_time / test_iter * 1000000) + print('Bandwidth: ', hidden_size * intermediate_size * 3 * n_routed_experts * bytes_per_elem * test_iter / total_time / 1000 / 1000 / 1000, 'GB/s') + print('') + +bench_moe("fp32") +bench_moe("fp16") +bench_moe("bf16") +bench_moe("q8_0") +bench_moe("q6_k") +bench_moe("q5_k_m") +bench_moe("q4_k_m") +bench_moe("q3_k_m") +bench_moe("q2_k") +# Not supported on __x86_64__ +# bench_linear("iq3_xs") +# bench_linear("iq2_xxs") diff --git a/ktransformers/ktransformers_ext/bench/bench_moe_torch.py b/ktransformers/ktransformers_ext/bench/bench_moe_torch.py new file mode 100644 index 0000000..5075636 --- /dev/null +++ b/ktransformers/ktransformers_ext/bench/bench_moe_torch.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Description : +Author : chenht2022 +Date : 2024-07-25 10:32:05 +Version : 1.0.0 +LastEditors : chenht2022 +LastEditTime : 2024-07-25 10:32:57 +Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +''' +import os, sys +import time +import torch +import torch.nn.quantized as nnq + +def act_fn(x): + return x / (1.0 + torch.exp(-x)) + +def bench_moe(quant_mode: str): + with torch.inference_mode(mode=True): + expert_num = 10 + hidden_size = 5120 + intermediate_size = 1536 + n_routed_experts = 6 + layer_num = 10 + warm_up_iter = 1000 + test_iter = 10000 + + if quant_mode == "fp32": + proj_type = torch.float32 + bytes_per_elem = 4.000000 + elif quant_mode == "fp16": + proj_type = torch.float16 + bytes_per_elem = 2.000000 + elif quant_mode == "bf16": + proj_type = torch.bfloat16 + bytes_per_elem = 2.000000 + elif quant_mode == "qint8": + proj_type = torch.qint8 + bytes_per_elem = 1.000000 + else: + assert(False) + + gate_projs = [] + up_projs = [] + down_projs = [] + for _ in range(layer_num): + gate_proj = torch.randn((expert_num, intermediate_size, hidden_size), dtype=torch.float32, device = "cuda").to("cpu").contiguous() + up_proj = torch.randn((expert_num, intermediate_size, hidden_size), dtype=torch.float32, device = "cuda").to("cpu").contiguous() + down_proj = torch.randn((expert_num, hidden_size, intermediate_size), dtype=torch.float32, device = "cuda").to("cpu").contiguous() + if quant_mode == "qint8": + scale, zero_point = 0.1, 0 # Adjust scale and zero_point based on your dataset + quantized_gate_proj = [] + quantized_up_proj = [] + quantized_down_proj = [] + for i in range(expert_num): + gate_proj_q = torch.quantize_per_tensor(gate_proj[i], scale, zero_point, torch.qint8) + quantized_gate = nnq.Linear(hidden_size, intermediate_size) + quantized_gate.set_weight_bias(gate_proj_q, None) + quantized_gate_proj.append(quantized_gate) + up_proj_q = torch.quantize_per_tensor(up_proj[i], scale, zero_point, torch.qint8) + quantized_up = nnq.Linear(hidden_size, intermediate_size) + quantized_up.set_weight_bias(up_proj_q, None) + quantized_up_proj.append(quantized_up) + down_proj_q = torch.quantize_per_tensor(down_proj[i], scale, zero_point, torch.qint8) + quantized_down = nnq.Linear(intermediate_size, hidden_size) + quantized_down.set_weight_bias(down_proj_q, None) + quantized_down_proj.append(quantized_down) + gate_projs.append(quantized_gate_proj) + up_projs.append(quantized_up_proj) + down_projs.append(quantized_down_proj) + else: + gate_projs.append(gate_proj.to(proj_type)) + up_projs.append(up_proj.to(proj_type)) + down_projs.append(down_proj.to(proj_type)) + + # warm up + for i in range(warm_up_iter): + expert_ids = torch.randint(0, expert_num, (n_routed_experts,), dtype=torch.int64).contiguous() + weights = torch.rand((n_routed_experts,), dtype=torch.float32).contiguous() + input = torch.randn((1, hidden_size), dtype=torch.float32).contiguous() + if quant_mode == "qint8": + input_q = torch.quantize_per_tensor(input, scale, zero_point, torch.quint8) + t_output = torch.zeros((1, hidden_size), dtype=torch.float32).contiguous() + gate_proj = gate_projs[i%layer_num] + up_proj = up_projs[i%layer_num] + down_proj = down_projs[i%layer_num] + for i, expert_id in enumerate(expert_ids): + quantized_gate = gate_proj[expert_id] + gate_buf = quantized_gate(input_q) + quantized_up = up_proj[expert_id] + up_buf = quantized_up(input_q) + gate_buf = gate_buf.dequantize() + up_buf = up_buf.dequantize() + intermediate = act_fn(gate_buf) * up_buf + intermediate_q = torch.quantize_per_tensor(intermediate, scale, zero_point, torch.quint8) + quantized_down = down_proj[expert_id] + expert_output = quantized_down(intermediate_q) + expert_output = expert_output.dequantize() + t_output += weights[i] * expert_output + else: + t_output = torch.zeros((1, hidden_size), dtype=proj_type).contiguous() + gate_proj = gate_projs[i%layer_num] + up_proj = up_projs[i%layer_num] + down_proj = down_projs[i%layer_num] + for i, expert_id in enumerate(expert_ids): + gate_buf = torch.mm(input.to(proj_type), gate_proj[expert_id].t()) + up_buf = torch.mm(input.to(proj_type), up_proj[expert_id].t()) + intermediate = act_fn(gate_buf) * up_buf + expert_output = torch.mm(intermediate.to(proj_type), down_proj[expert_id].t()) + t_output += weights[i] * expert_output + + # test + total_time = 0 + for i in range(test_iter): + expert_ids = torch.randint(0, expert_num, (n_routed_experts,), dtype=torch.int64).contiguous() + weights = torch.rand((n_routed_experts,), dtype=torch.float32).contiguous() + input = torch.randn((1, hidden_size), dtype=torch.float32).contiguous() + start = time.perf_counter() + if quant_mode == "qint8": + input_q = torch.quantize_per_tensor(input, scale, zero_point, torch.quint8) + t_output = torch.zeros((1, hidden_size), dtype=torch.float32).contiguous() + gate_proj = gate_projs[i%layer_num] + up_proj = up_projs[i%layer_num] + down_proj = down_projs[i%layer_num] + for i, expert_id in enumerate(expert_ids): + quantized_gate = gate_proj[expert_id] + gate_buf = quantized_gate(input_q) + quantized_up = up_proj[expert_id] + up_buf = quantized_up(input_q) + gate_buf = gate_buf.dequantize() + up_buf = up_buf.dequantize() + intermediate = act_fn(gate_buf) * up_buf + intermediate_q = torch.quantize_per_tensor(intermediate, scale, zero_point, torch.quint8) + quantized_down = down_proj[expert_id] + expert_output = quantized_down(intermediate_q) + expert_output = expert_output.dequantize() + t_output += weights[i] * expert_output + else: + t_output = torch.zeros((1, hidden_size), dtype=proj_type).contiguous() + gate_proj = gate_projs[i%layer_num] + up_proj = up_projs[i%layer_num] + down_proj = down_projs[i%layer_num] + for i, expert_id in enumerate(expert_ids): + gate_buf = torch.mm(input.to(proj_type), gate_proj[expert_id].t()) + up_buf = torch.mm(input.to(proj_type), up_proj[expert_id].t()) + intermediate = act_fn(gate_buf) * up_buf + expert_output = torch.mm(intermediate.to(proj_type), down_proj[expert_id].t()) + t_output += weights[i] * expert_output + end = time.perf_counter() + total_time += end - start + print('Quant mode: ', quant_mode) + print('Time(s): ', total_time) + print('Iteration: ', test_iter) + print('Time(us) per iteration: ', total_time / test_iter * 1000000) + print('Bandwidth: ', hidden_size * intermediate_size * 3 * n_routed_experts * bytes_per_elem * test_iter / total_time / 1000 / 1000 / 1000, 'GB/s') + print('') + +bench_moe("fp32") +bench_moe("fp16") +bench_moe("bf16") +bench_moe("qint8") diff --git a/ktransformers/ktransformers_ext/cpu_backend/backend.cpp b/ktransformers/ktransformers_ext/cpu_backend/backend.cpp new file mode 100644 index 0000000..5707505 --- /dev/null +++ b/ktransformers/ktransformers_ext/cpu_backend/backend.cpp @@ -0,0 +1,100 @@ +/** + * @Description : + * @Author : chenht2022 + * @Date : 2024-07-22 02:03:05 + * @Version : 1.0.0 + * @LastEditors : chenht2022 + * @LastEditTime : 2024-07-25 10:33:34 + * @Copyright (c) 2024 by KVCache.AI, All Rights Reserved. + **/ +#include "backend.h" + +Backend::Backend(int thread_num) { + thread_num_ = thread_num; + thread_state_.resize(thread_num); + for (int i = 0; i < thread_num; i++) { + thread_state_[i].curr = std::make_unique>(); + thread_state_[i].status = std::make_unique>(ThreadStatus::WAITING); + } + workers_.resize(thread_num); + for (int i = 1; i < thread_num; i++) { + workers_[i] = std::thread(&Backend::worker_thread, this, i); + } +} + +Backend::~Backend() { + for (int i = 0; i < thread_num_; i++) { + thread_state_[i].status->store(ThreadStatus::EXIT, std::memory_order_release); + } + for (int i = 1; i < thread_num_; i++) { + if (workers_[i].joinable()) { + workers_[i].join(); + } + } +} + +int Backend::get_thread_num() { + return thread_num_; +} + +void Backend::do_work_stealing_job(int task_num, std::function func) { + func_ = func; + int base = task_num / thread_num_; + int remain = task_num % thread_num_; + thread_state_[0].end = base + (0 < remain); + for (int i = 1; i < thread_num_; i++) { + thread_state_[i].curr->store(thread_state_[i - 1].end, std::memory_order_relaxed); + thread_state_[i].end = thread_state_[i - 1].end + base + (i < remain); + thread_state_[i].status->store(ThreadStatus::WORKING, std::memory_order_release); + } + thread_state_[0].curr->store(0, std::memory_order_relaxed); + thread_state_[0].status->store(ThreadStatus::WORKING, std::memory_order_release); + process_tasks(0); + for (int i = 1; i < thread_num_; i++) { + while (thread_state_[i].status->load(std::memory_order_acquire) == ThreadStatus::WORKING) { + } + } +} + +void Backend::process_tasks(int thread_id) { + while (true) { + int task_id = thread_state_[thread_id].curr->fetch_add(1, std::memory_order_acq_rel); + if (task_id >= thread_state_[thread_id].end) { + break; + } + func_(task_id); + } + for (int t_offset = 1; t_offset < thread_num_; t_offset++) { + int t_i = (thread_id + t_offset) % thread_num_; + if (thread_state_[t_i].status->load(std::memory_order_acquire) != ThreadStatus::WORKING) { + continue; + } + while (true) { + int task_id = thread_state_[t_i].curr->fetch_add(1, std::memory_order_acq_rel); + if (task_id >= thread_state_[t_i].end) { + break; + } + func_(task_id); + } + } + thread_state_[thread_id].status->store(ThreadStatus::WAITING, std::memory_order_release); +} + +void Backend::worker_thread(int thread_id) { + auto start = std::chrono::steady_clock::now(); + while (true) { + ThreadStatus status = thread_state_[thread_id].status->load(std::memory_order_acquire); + if (status == ThreadStatus::WORKING) { + process_tasks(thread_id); + start = std::chrono::steady_clock::now(); + } else if (status == ThreadStatus::WAITING) { + auto now = std::chrono::steady_clock::now(); + auto duration = std::chrono::duration_cast(now - start).count(); + if (duration > 50) { + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + } + } else if (status == ThreadStatus::EXIT) { + return; + } + } +} \ No newline at end of file diff --git a/ktransformers/ktransformers_ext/cpu_backend/backend.h b/ktransformers/ktransformers_ext/cpu_backend/backend.h new file mode 100644 index 0000000..be3d45b --- /dev/null +++ b/ktransformers/ktransformers_ext/cpu_backend/backend.h @@ -0,0 +1,50 @@ +/** + * @Description : + * @Author : chenht2022 + * @Date : 2024-07-22 02:03:05 + * @Version : 1.0.0 + * @LastEditors : chenht2022 + * @LastEditTime : 2024-07-25 10:33:38 + * @Copyright (c) 2024 by KVCache.AI, All Rights Reserved. + **/ +#ifndef CPUINFER_BACKEND_H +#define CPUINFER_BACKEND_H + +#include +#include +#include +#include +#include +#include +#include + +enum ThreadStatus { + WORKING, + WAITING, + EXIT, +}; + +struct ThreadState { + std::unique_ptr> status; + std::unique_ptr> curr; + int end; +}; + +class Backend { + public: + Backend(int); + ~Backend(); + int get_thread_num(); + void do_work_stealing_job(int, std::function); + + private: + int thread_num_; + std::vector thread_state_; // [thread_num] + std::function func_; + std::vector workers_; + + void process_tasks(int); + void worker_thread(int); +}; + +#endif \ No newline at end of file diff --git a/ktransformers/ktransformers_ext/cpu_backend/cpuinfer.h b/ktransformers/ktransformers_ext/cpu_backend/cpuinfer.h new file mode 100644 index 0000000..eae6f90 --- /dev/null +++ b/ktransformers/ktransformers_ext/cpu_backend/cpuinfer.h @@ -0,0 +1,57 @@ +/** + * @Description : + * @Author : chenht2022 + * @Date : 2024-07-16 10:43:18 + * @Version : 1.0.0 + * @LastEditors : chenht2022 + * @LastEditTime : 2024-07-25 10:33:42 + * @Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +**/ +#ifndef CPUINFER_CPUINFER_H +#define CPUINFER_CPUINFER_H + +#include +#include +#include +#include +#include +#include +#include + +#include "backend.h" +#include "task_queue.h" + +#include "llama.cpp/ggml-impl.h" + +class CPUInfer { + public: + CPUInfer(int thread_num) { + backend_ = new Backend(thread_num - 1); + task_queue_ = new TaskQueue(); + for (int i = 0; i < (1 << 16); ++i) { + ggml_table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(i); + } + } + + ~CPUInfer() { + delete backend_; + delete task_queue_; + } + + template + void submit(Func f, Obj* obj, Args... args) { + task_queue_->enqueue([=]() { + std::invoke(f, *obj, args..., backend_); + }); + } + + void sync() { + task_queue_->sync(); + } + + public: + Backend* backend_; + TaskQueue* task_queue_; +}; + +#endif \ No newline at end of file diff --git a/ktransformers/ktransformers_ext/cpu_backend/task_queue.cpp b/ktransformers/ktransformers_ext/cpu_backend/task_queue.cpp new file mode 100644 index 0000000..0ca865b --- /dev/null +++ b/ktransformers/ktransformers_ext/cpu_backend/task_queue.cpp @@ -0,0 +1,57 @@ +/** + * @Description : + * @Author : chenht2022 + * @Date : 2024-07-17 12:25:51 + * @Version : 1.0.0 + * @LastEditors : chenht2022 + * @LastEditTime : 2024-07-25 10:33:44 + * @Copyright (c) 2024 by KVCache.AI, All Rights Reserved. + **/ +#include "task_queue.h" + +TaskQueue::TaskQueue() { + worker = std::thread(&TaskQueue::processTasks, this); + sync_flag.store(true, std::memory_order_seq_cst); + exit_flag.store(false, std::memory_order_seq_cst); +} + +TaskQueue::~TaskQueue() { + exit_flag.store(true, std::memory_order_seq_cst); + if (worker.joinable()) { + worker.join(); + } +} + +void TaskQueue::enqueue(std::function task) { + mutex.lock(); + tasks.push(task); + sync_flag.store(false, std::memory_order_seq_cst); + mutex.unlock(); +} + +void TaskQueue::sync() { + while (!sync_flag.load(std::memory_order_seq_cst)) + ; +} + +void TaskQueue::processTasks() { + while (true) { + mutex.lock(); + if (tasks.empty()) { + if (exit_flag.load(std::memory_order_seq_cst)) { + return; + } + mutex.unlock(); + continue; + } + std::function task = tasks.front(); + mutex.unlock(); + task(); + mutex.lock(); + tasks.pop(); + if (tasks.empty()) { + sync_flag.store(true, std::memory_order_seq_cst); + } + mutex.unlock(); + } +} diff --git a/ktransformers/ktransformers_ext/cpu_backend/task_queue.h b/ktransformers/ktransformers_ext/cpu_backend/task_queue.h new file mode 100644 index 0000000..b4212fd --- /dev/null +++ b/ktransformers/ktransformers_ext/cpu_backend/task_queue.h @@ -0,0 +1,39 @@ +/** + * @Description : + * @Author : chenht2022 + * @Date : 2024-07-16 10:43:18 + * @Version : 1.0.0 + * @LastEditors : chenht2022 + * @LastEditTime : 2024-07-25 10:33:47 + * @Copyright (c) 2024 by KVCache.AI, All Rights Reserved. + **/ +#ifndef CPUINFER_TASKQUEUE_H +#define CPUINFER_TASKQUEUE_H + +#include +#include +#include +#include +#include +#include +#include + +class TaskQueue { + public: + TaskQueue(); + ~TaskQueue(); + + void enqueue(std::function); + + void sync(); + + private: + void processTasks(); + + std::queue> tasks; + std::thread worker; + std::mutex mutex; + std::atomic sync_flag; + std::atomic exit_flag; +}; +#endif \ No newline at end of file diff --git a/ktransformers/ktransformers_ext/cuda/binding.cpp b/ktransformers/ktransformers_ext/cuda/binding.cpp new file mode 100644 index 0000000..2d5da68 --- /dev/null +++ b/ktransformers/ktransformers_ext/cuda/binding.cpp @@ -0,0 +1,32 @@ +/** + * @Description : + * @Author : Azure-Tang + * @Date : 2024-07-25 13:38:30 + * @Version : 1.0.0 + * @LastEditors : Azure + * @LastEditTime : 2024-07-26 08:36:03 + * @Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +**/ + +#include "custom_gguf/ops.h" +#include "gptq_marlin/ops.h" +// Python bindings +#include +#include +#include +#include +#include +// namespace py = pybind11; + +PYBIND11_MODULE(KTransformersOps, m) { + m.def("dequantize_q8_0", &dequantize_q8_0, "Function to dequantize q8_0 data.", + py::arg("data"), py::arg("blk_size"), py::arg("device")); + m.def("dequantize_q6_k", &dequantize_q6_k, "Function to dequantize q6_k data.", + py::arg("data"), py::arg("blk_size"), py::arg("device")); + m.def("dequantize_q4_k", &dequantize_q4_k, "Function to dequantize q4_k data.", + py::arg("data"), py::arg("blk_size"), py::arg("device")); + m.def("gptq_marlin_gemm", &gptq_marlin_gemm, "Function to perform GEMM using Marlin quantization.", + py::arg("a"), py::arg("b_q_weight"), py::arg("b_scales"), py::arg("g_idx"), + py::arg("perm"), py::arg("workspace"), py::arg("num_bits"), py::arg("size_m"), + py::arg("size_n"), py::arg("size_k"), py::arg("is_k_full")); +} diff --git a/ktransformers/ktransformers_ext/cuda/custom_gguf/binding.cpp b/ktransformers/ktransformers_ext/cuda/custom_gguf/binding.cpp new file mode 100644 index 0000000..ea52e8f --- /dev/null +++ b/ktransformers/ktransformers_ext/cuda/custom_gguf/binding.cpp @@ -0,0 +1,25 @@ +#include "ops.h" +// Python bindings +#include +#include +#include +#include +#include +// namespace py = pybind11; + +int test(){ + return 5; +} + +torch::Tensor dequantize_q6_k(torch::Tensor data, int blk_size, torch::Device device); + +PYBIND11_MODULE(cudaops, m) { + m.def("dequantize_q8_0", &dequantize_q8_0, "Function to dequantize q8_0 data.", + py::arg("data"), py::arg("blk_size"), py::arg("device")); + m.def("dequantize_q6_k", &dequantize_q6_k, "Function to dequantize q6_k data.", + py::arg("data"), py::arg("blk_size"), py::arg("device")); + m.def("dequantize_q4_k", &dequantize_q4_k, "Function to dequantize q4_k data.", + py::arg("data"), py::arg("blk_size"), py::arg("device")); + m.def("test", &test, "Function to test."); + +} diff --git a/ktransformers/ktransformers_ext/cuda/custom_gguf/custom_ggml.h b/ktransformers/ktransformers_ext/cuda/custom_gguf/custom_ggml.h new file mode 100644 index 0000000..333dc69 --- /dev/null +++ b/ktransformers/ktransformers_ext/cuda/custom_gguf/custom_ggml.h @@ -0,0 +1,39 @@ + + + +#include + + +__device__ float ggml_compute_fp16_to_fp32(uint16_t h) { + return __uint2float_rd(h); +} + +static inline float ggml_compute_fp16_to_fp32(uint16_t h) { + uint16_t tmp; + memcpy(&tmp, &h, sizeof(ggml_fp16_t)); + return (float)tmp; +} + +// define the global table for fp16 to fp32 conversion +__device__ float ggml_table_f32_f16[1 << 16]; + +// CUDA Kernel to init the table +__global__ void init_fp16_to_fp32_table() { + int idx = blockIdx.x * blockDim.x + threadIdx.x; + for (auto blk_id = idx; blk_id<(1 << 16); blk_id+=blockDim.x * gridDim.x){ + ggml_table_f32_f16[blk_id] = GGML_COMPUTE_FP16_TO_FP32(blk_id); + } +} + +#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) + +extern __device__ float ggml_table_f32_f16[1 << 16]; // Declare as __device__ if used within device code + +// This version of the function is designed to be called from within a CUDA kernel +#if !defined(GGML_FP16_TO_FP32) +__device__ float ggml_lookup_fp16_to_fp32(uint16_t f) { + return ggml_table_f32_f16[f]; +} + +#define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x) +#endif \ No newline at end of file diff --git a/ktransformers/ktransformers_ext/cuda/custom_gguf/dequant.cu b/ktransformers/ktransformers_ext/cuda/custom_gguf/dequant.cu new file mode 100644 index 0000000..38f4842 --- /dev/null +++ b/ktransformers/ktransformers_ext/cuda/custom_gguf/dequant.cu @@ -0,0 +1,164 @@ +/* + * @Description : + * @Author : Azure-Tang, Boxin Zhang + * @Date : 2024-07-25 13:38:30 + * @Version : 1.0.0 + * @LastEditors : Azure + * @LastEditTime : 2024-07-26 11:58:50 + * Adapted from https://github.com/ggerganov/ggml/blob/fca1caafea7de9fbd7efc733b9818f9cf2da3050/src/ggml-quants.c + * Copyright (c) 2023-2024 The ggml authors + * Copyright (c) 2024 by KVCache.AI, All Rights Reserved. + */ +#include +#include +#include +#include +#include + +__global__ void dequantize_q8_0_kernel(float* output, const float* scales, const int8_t* qs, int num_blocks, int blk_size) { + int global_idx = blockIdx.x * blockDim.x + threadIdx.x; + for (auto block_id=global_idx; block_id> 6) << 4); + *m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4); + } +} + +__global__ void dequantize_q4_k_kernel(int8_t* data, float* output, int blk_size, int num_blocks) { + int global_idx = blockIdx.x * blockDim.x + threadIdx.x; + for (auto block_id=global_idx; block_id(data + block_id * 144 + 0))); + const float min = __half2float(*(reinterpret_cast(data + block_id * 144 + 2))); + int is = 0; + uint8_t sc, m; + for (int j = 0; j < blk_size; j += 64) { + uint8_t* scales = (uint8_t*)(data + block_id * 144 + 4); + get_scale_min_k4(is + 0, scales, &sc, &m); + const float d1 = d * sc; const float m1 = min * m; + get_scale_min_k4(is + 1, scales, &sc, &m); + const float d2 = d * sc; const float m2 = min * m; + for (int l = 0; l < 32; ++l) *output_blk++ = d1 * (q[l] & 0xF) - m1; + for (int l = 0; l < 32; ++l) *output_blk++ = d2 * (q[l] >> 4) - m2; + q += 32; is += 2; + } + } +} + +__global__ void dequantize_q6_k_kernel(int8_t* data, float* output, int blk_size, int num_blocks) { + int global_idx = blockIdx.x * blockDim.x + threadIdx.x; + for (auto block_id=global_idx; block_id(data + block_id * blk_size + 208))); + + const uint8_t * __restrict__ ql = (uint8_t*)(data + block_id * blk_size); + const uint8_t * __restrict__ qh = (uint8_t*)(data + block_id * blk_size + 128); + const int8_t * __restrict__ sc = (int8_t*)(data + block_id * blk_size + 192); + + + //if (blk_size == 256){ + for (int n = 0; n < blk_size; n += 128) { + for (int l = 0; l < 32; ++l) { + int is = l/16; + const int8_t q1 = (int8_t)((ql[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; + const int8_t q2 = (int8_t)((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; + const int8_t q3 = (int8_t)((ql[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; + const int8_t q4 = (int8_t)((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; + output_blk[l + 0] = d * sc[is + 0] * q1; + output_blk[l + 32] = d * sc[is + 2] * q2; + output_blk[l + 64] = d * sc[is + 4] * q3; + output_blk[l + 96] = d * sc[is + 6] * q4; + } + output_blk += 128; + ql += 64; + qh += 32; + sc += 8; + } + } +} + + +torch::Tensor dequantize_q8_0(torch::Tensor data, int blk_size, torch::Device device) { + int num_blocks = data.numel() / blk_size; + // create gpu + auto options_scales = torch::TensorOptions().dtype(torch::kFloat32).device(device).memory_format(torch::MemoryFormat::Contiguous); + auto options_qs = torch::TensorOptions().dtype(torch::kInt8).device(device).memory_format(torch::MemoryFormat::Contiguous); + auto scales_gpu = torch::empty({{num_blocks, 1}}, options_scales); + auto qs_gpu = torch::empty({num_blocks, 32}, options_qs); + + // read on cpu + options_scales = torch::TensorOptions().dtype(torch::kFloat16).device(torch::kCPU); + options_qs = torch::TensorOptions().dtype(torch::kInt8).device(torch::kCPU); + + // // reinterpret + auto scales = torch::from_blob(data.data_ptr(), {num_blocks, 1 + 16}, options_scales).slice(1, 0, 1); + auto qs = torch::from_blob(data.data_ptr(), {num_blocks, 2 + 32}, options_qs).slice(1, 2); + + auto scales_f32 = scales.to(torch::kFloat32); + scales_gpu.copy_(scales_f32, false); + qs_gpu.copy_(qs, false); + + // Create output tensor + auto output = torch::zeros_like(qs, torch::dtype(torch::kFloat32).device(device)); + + // Launch kernel + dequantize_q8_0_kernel<<< 512, 256 >>>( + output.data_ptr(), scales_gpu.data_ptr(), qs_gpu.data_ptr(), num_blocks, 32); + + cudaDeviceSynchronize(); + return output; +} + + +torch::Tensor dequantize_q6_k(torch::Tensor data, int blk_size, torch::Device device) { + // data.numel%blk_size should be 0, else raise err + int num_blocks = data.numel() / blk_size; + + auto options = torch::TensorOptions().dtype(torch::kInt8).device(device).memory_format(torch::MemoryFormat::Contiguous); + auto data_gpu = torch::empty({data.numel()}, options); + + data_gpu.copy_(data, false); + + // Create output tensor + auto output = torch::zeros({num_blocks, 256}, torch::dtype(torch::kFloat32).device(device)); + + // Launch kernel + dequantize_q6_k_kernel<<< 512, 256 >>>(data_gpu.data_ptr(), output.data_ptr(), blk_size, num_blocks); + // dequantize_q6_k_kernel<<< 512, 256 >>>(data_gpu.data_ptr(), output.data_ptr(), 256, num_blocks); + + cudaDeviceSynchronize(); + return output; +} + +torch::Tensor dequantize_q4_k(torch::Tensor data, int blk_size, torch::Device device) { + // data.numel%blk_size should be 0, else raise err + int num_blocks = data.numel() / blk_size; + + auto options = torch::TensorOptions().dtype(torch::kInt8).device(device).memory_format(torch::MemoryFormat::Contiguous); + auto data_gpu = torch::empty({data.numel()}, options); + + data_gpu.copy_(data, false); + + // Create output tensor + auto output = torch::zeros({num_blocks, 256}, torch::dtype(torch::kFloat32).device(device)); + + // Launch kernel + dequantize_q4_k_kernel<<< 512, 256 >>>(data_gpu.data_ptr(), output.data_ptr(), 256, num_blocks); + + cudaDeviceSynchronize(); + return output; +} diff --git a/ktransformers/ktransformers_ext/cuda/custom_gguf/ops.h b/ktransformers/ktransformers_ext/cuda/custom_gguf/ops.h new file mode 100644 index 0000000..9af8f30 --- /dev/null +++ b/ktransformers/ktransformers_ext/cuda/custom_gguf/ops.h @@ -0,0 +1,18 @@ +/** + * @Description : + * @Author : Azure-Tang + * @Date : 2024-07-22 09:27:55 + * @Version : 1.0.0 + * @LastEditors : Azure + * @LastEditTime : 2024-07-26 08:38:20 + * @Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +**/ +#pragma once + +#include +#include +#include + +torch::Tensor dequantize_q8_0(torch::Tensor data, int blk_size, torch::Device device); +torch::Tensor dequantize_q6_k(torch::Tensor data, int blk_size, torch::Device device); +torch::Tensor dequantize_q4_k(torch::Tensor data, int blk_size, torch::Device device); \ No newline at end of file diff --git a/ktransformers/ktransformers_ext/cuda/gptq_marlin/gptq_marlin.cu b/ktransformers/ktransformers_ext/cuda/gptq_marlin/gptq_marlin.cu new file mode 100644 index 0000000..e8e5153 --- /dev/null +++ b/ktransformers/ktransformers_ext/cuda/gptq_marlin/gptq_marlin.cu @@ -0,0 +1,1872 @@ +/* + * Modified by Neural Magic + * Copyright (C) Marlin.2024 Elias Frantar + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Adapted from https://github.com/IST-DASLab/marlin + */ +/* + * Adapted from https://github.com/vllm-project/vllm/tree/main/csrc/quantization/gptq_marlin + */ +#include "gptq_marlin.cuh" +#include "gptq_marlin_dtypes.cuh" + +#define STATIC_ASSERT_SCALAR_TYPE_VALID(scalar_t) \ + static_assert(std::is_same::value || \ + std::is_same::value, \ + "only float16 and bfloat16 is supported"); + +template +inline std::string str(T x) { + return std::to_string(x); +} + +namespace gptq_marlin { + +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800 + +__global__ void permute_cols_kernel(int4 const* __restrict__ a_int4_ptr, + int const* __restrict__ perm_int_ptr, + int4* __restrict__ out_int4_ptr, int size_m, + int size_k, int block_rows) {} + +template shared + // fetch pipeline + const bool has_act_order, // whether act_order is enabled + const int group_blocks = -1 // number of consecutive 16x16 blocks + // with a separate quantization scale + > +__global__ void Marlin( + const int4* __restrict__ A, // fp16 input matrix of shape mxk + const int4* __restrict__ B, // 4bit quantized weight matrix of shape kxn + int4* __restrict__ C, // fp16 output buffer of shape mxn + const int4* __restrict__ scales_ptr, // fp16 quantization scales of shape + // (k/groupsize)xn + const int* __restrict__ g_idx, // int32 group indices of shape k + int num_groups, // number of scale groups per output channel + int prob_m, // batch dimension m + int prob_n, // output dimension n + int prob_k, // reduction dimension k + int* locks // extra global storage for barrier synchronization +) {} + +} // namespace gptq_marlin + +torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight, + torch::Tensor& b_scales, torch::Tensor& g_idx, + torch::Tensor& perm, torch::Tensor& workspace, + int64_t num_bits, int64_t size_m, int64_t size_n, + int64_t size_k, bool is_k_full) { + TORCH_CHECK_NOT_IMPLEMENTED(false, + "marlin_gemm(..) requires CUDA_ARCH >= 8.0"); + return torch::empty({1, 1}); +} + +#else + +// m16n8k16 tensor core mma instruction with fp16 inputs and fp32 +// output/accumulation. +template +__device__ inline void mma(const typename ScalarType::FragA& a_frag, + const typename ScalarType::FragB& frag_b, + typename ScalarType::FragC& frag_c) { + const uint32_t* a = reinterpret_cast(&a_frag); + const uint32_t* b = reinterpret_cast(&frag_b); + float* c = reinterpret_cast(&frag_c); + if constexpr (std::is_same::value) { + asm volatile( + "mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 " + "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" + : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) + : "r"(a[0]), "r"(a[1]), "r"(a[2]), "r"(a[3]), "r"(b[0]), "r"(b[1]), + "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); + } else if constexpr (std::is_same::value) { + asm volatile( + "mma.sync.aligned.m16n8k16.row.col.f32.bf16.bf16.f32 " + "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" + : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) + : "r"(a[0]), "r"(a[1]), "r"(a[2]), "r"(a[3]), "r"(b[0]), "r"(b[1]), + "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); + } else { + STATIC_ASSERT_SCALAR_TYPE_VALID(scalar_t); + } +} + +// Instruction for loading a full 16x16 matrix fragment of operand A from shared +// memory, directly in tensor core layout. +template +__device__ inline void ldsm4(typename ScalarType::FragA& frag_a, + const void* smem_ptr) { + uint32_t* a = reinterpret_cast(&frag_a); + uint32_t smem = static_cast(__cvta_generic_to_shared(smem_ptr)); + asm volatile("ldmatrix.sync.aligned.m8n8.x4.shared.b16 {%0,%1,%2,%3}, [%4];\n" + : "=r"(a[0]), "=r"(a[1]), "=r"(a[2]), "=r"(a[3]) + : "r"(smem)); +} + +// Lookup-table based 3-input logical operation; explicitly used for +// dequantization as the compiler does not seem to automatically recognize it in +// all cases. +template +__device__ inline int lop3(int a, int b, int c) { + int res; + asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n" + : "=r"(res) + : "r"(a), "r"(b), "r"(c), "n"(lut)); + return res; +} + +// Constructs destination register by taking bytes from 2 sources (based on +// mask) +template +__device__ inline uint32_t prmt(uint32_t a) { + uint32_t res; + asm volatile("prmt.b32 %0, %1, %2, %3;\n" + : "=r"(res) + : "r"(a), "n"(start_byte), "n"(mask)); + return res; +} + +// Efficiently dequantize an int32 value into a full B-fragment of 4 fp16 +// values. We mostly follow the strategy in the link below, with some small +// changes: +// - FP16: +// https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/cutlass_extensions/include/cutlass_extensions/interleaved_numeric_conversion.h#L215-L287 +// - BF16: +// https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/cutlass_extensions/include/cutlass_extensions/interleaved_numeric_conversion.h#L327-L385 +template +__device__ inline typename ScalarType::FragB dequant_4bit(int q) { + STATIC_ASSERT_SCALAR_TYPE_VALID(scalar_t); +} + +template <> +__device__ inline typename ScalarType::FragB dequant_4bit(int q) { + const int LO = 0x000f000f; + const int HI = 0x00f000f0; + const int EX = 0x64006400; + // Guarantee that the `(a & b) | c` operations are LOP3s. + int lo = lop3<(0xf0 & 0xcc) | 0xaa>(q, LO, EX); + int hi = lop3<(0xf0 & 0xcc) | 0xaa>(q, HI, EX); + // We want signed int4 outputs, hence we fuse the `-8` symmetric zero point + // directly into `SUB` and `ADD`. + const int SUB = 0x64086408; + const int MUL = 0x2c002c00; + const int ADD = 0xd480d480; + typename ScalarType::FragB frag_b; + frag_b[0] = __hsub2(*reinterpret_cast(&lo), + *reinterpret_cast(&SUB)); + frag_b[1] = __hfma2(*reinterpret_cast(&hi), + *reinterpret_cast(&MUL), + *reinterpret_cast(&ADD)); + return frag_b; +} + +template <> +__device__ inline typename ScalarType::FragB +dequant_4bit(int q) { + static constexpr uint32_t MASK = 0x000f000f; + static constexpr uint32_t EX = 0x43004300; + + // Guarantee that the `(a & b) | c` operations are LOP3s. + + int lo = lop3<(0xf0 & 0xcc) | 0xaa>(q, MASK, EX); + q >>= 4; + int hi = lop3<(0xf0 & 0xcc) | 0xaa>(q, MASK, EX); + + typename ScalarType::FragB frag_b; + static constexpr uint32_t MUL = 0x3F803F80; + static constexpr uint32_t ADD = 0xC308C308; + + frag_b[0] = __hfma2(*reinterpret_cast(&lo), + *reinterpret_cast(&MUL), + *reinterpret_cast(&ADD)); + frag_b[1] = __hfma2(*reinterpret_cast(&hi), + *reinterpret_cast(&MUL), + *reinterpret_cast(&ADD)); + return frag_b; +} + +// Fast Int8ToFp16/Int8ToBf16: Efficiently dequantize 8bit int values to fp16 or +// bf16 Reference: +// - FP16: +// https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/cutlass_extensions/include/cutlass_extensions/interleaved_numeric_conversion.h#L53-L85 +// - BF16: +// https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/cutlass_extensions/include/cutlass_extensions/interleaved_numeric_conversion.h#L125-L175 +template +__device__ inline typename ScalarType::FragB dequant_8bit(int q) { + STATIC_ASSERT_SCALAR_TYPE_VALID(scalar_t); +} + +template <> +__device__ inline typename ScalarType::FragB dequant_8bit(int q) { + static constexpr uint32_t mask_for_elt_01 = 0x5250; + static constexpr uint32_t mask_for_elt_23 = 0x5351; + static constexpr uint32_t start_byte_for_fp16 = 0x64646464; + + uint32_t lo = prmt(q); + uint32_t hi = prmt(q); + + static constexpr uint32_t I8s_TO_F16s_MAGIC_NUM = 0x64806480; + + typename ScalarType::FragB frag_b; + frag_b[0] = __hsub2(*reinterpret_cast(&lo), + *reinterpret_cast(&I8s_TO_F16s_MAGIC_NUM)); + frag_b[1] = __hsub2(*reinterpret_cast(&hi), + *reinterpret_cast(&I8s_TO_F16s_MAGIC_NUM)); + return frag_b; +} + +template <> +__device__ inline typename ScalarType::FragB +dequant_8bit(int q) { + typename ScalarType::FragB frag_b; + + float fp32_intermediates[4]; + uint32_t* fp32_intermediates_casted = + reinterpret_cast(fp32_intermediates); + + static constexpr uint32_t fp32_base = 0x4B000000; + fp32_intermediates_casted[0] = __byte_perm(q, fp32_base, 0x7650); + fp32_intermediates_casted[1] = __byte_perm(q, fp32_base, 0x7652); + fp32_intermediates_casted[2] = __byte_perm(q, fp32_base, 0x7651); + fp32_intermediates_casted[3] = __byte_perm(q, fp32_base, 0x7653); + + fp32_intermediates[0] -= 8388736.f; + fp32_intermediates[1] -= 8388736.f; + fp32_intermediates[2] -= 8388736.f; + fp32_intermediates[3] -= 8388736.f; + + uint32_t* bf16_result_ptr = reinterpret_cast(&frag_b); + bf16_result_ptr[0] = __byte_perm(fp32_intermediates_casted[0], + fp32_intermediates_casted[1], 0x7632); + bf16_result_ptr[1] = __byte_perm(fp32_intermediates_casted[2], + fp32_intermediates_casted[3], 0x7632); + + return frag_b; +} + +// Multiply dequantized values by the corresponding quantization scale; used +// only for grouped quantization. +template +__device__ inline void scale(typename ScalarType::FragB& frag_b, + typename ScalarType::FragS& frag_s, + int i) { + using scalar_t2 = typename ScalarType::scalar_t2; + scalar_t2 s = + ScalarType::num2num2(reinterpret_cast(&frag_s)[i]); + frag_b[0] = __hmul2(frag_b[0], s); + frag_b[1] = __hmul2(frag_b[1], s); +} + +// Same as above, but for act_order (each K is multiplied individually) +template +__device__ inline void scale4(typename ScalarType::FragB& frag_b, + typename ScalarType::FragS& frag_s_1, + typename ScalarType::FragS& frag_s_2, + typename ScalarType::FragS& frag_s_3, + typename ScalarType::FragS& frag_s_4, + int i) { + using scalar_t2 = typename ScalarType::scalar_t2; + scalar_t2 s_val_1_2; + s_val_1_2.x = reinterpret_cast(&frag_s_1)[i]; + s_val_1_2.y = reinterpret_cast(&frag_s_2)[i]; + + scalar_t2 s_val_3_4; + s_val_3_4.x = reinterpret_cast(&frag_s_3)[i]; + s_val_3_4.y = reinterpret_cast(&frag_s_4)[i]; + + frag_b[0] = __hmul2(frag_b[0], s_val_1_2); + frag_b[1] = __hmul2(frag_b[1], s_val_3_4); +} + +// Given 2 floats multiply by 2 scales (halves) +template +__device__ inline void scale_float(float* c, + typename ScalarType::FragS& s) { + scalar_t* s_ptr = reinterpret_cast(&s); + c[0] = __fmul_rn(c[0], ScalarType::num2float(s_ptr[0])); + c[1] = __fmul_rn(c[1], ScalarType::num2float(s_ptr[1])); +} + +// Wait until barrier reaches `count`, then lock for current threadblock. +__device__ inline void barrier_acquire(int* lock, int count) { + if (threadIdx.x == 0) { + int state = -1; + do + // Guarantee that subsequent writes by this threadblock will be visible + // globally. + asm volatile("ld.global.acquire.gpu.b32 %0, [%1];\n" + : "=r"(state) + : "l"(lock)); + while (state != count); + } + __syncthreads(); +} + +// Release barrier and increment visitation count. +__device__ inline void barrier_release(int* lock, bool reset = false) { + __syncthreads(); + if (threadIdx.x == 0) { + if (reset) { + lock[0] = 0; + return; + } + int val = 1; + // Make sure that all writes since acquiring this barrier are visible + // globally, while releasing the barrier. + asm volatile("fence.acq_rel.gpu;\n"); + asm volatile("red.relaxed.gpu.global.add.s32 [%0], %1;\n" + : + : "l"(lock), "r"(val)); + } +} + +// For a given "a" of size [M,K] performs a permutation of the K columns based +// on the given "perm" indices. +__global__ void permute_cols_kernel(int4 const* __restrict__ a_int4_ptr, + int const* __restrict__ perm_int_ptr, + int4* __restrict__ out_int4_ptr, int size_m, + int size_k, int block_rows) { + int start_row = block_rows * blockIdx.x; + int finish_row = start_row + block_rows; + if (finish_row > size_m) { + finish_row = size_m; + } + int cur_block_rows = finish_row - start_row; + + int row_stride = size_k * sizeof(half) / 16; + + auto permute_row = [&](int row) { + int iters = size_k / default_threads; + int rest = size_k % default_threads; + + int offset = row * row_stride; + + half const* a_row_half = reinterpret_cast(a_int4_ptr + offset); + half* out_half = reinterpret_cast(out_int4_ptr + offset); + + int base_k = 0; + + for (int i = 0; i < iters; i++) { + int cur_k = base_k + threadIdx.x; + int src_pos = perm_int_ptr[cur_k]; + + out_half[cur_k] = a_row_half[src_pos]; + + base_k += default_threads; + } + + if (rest) { + if (threadIdx.x < rest) { + int cur_k = base_k + threadIdx.x; + int src_pos = perm_int_ptr[cur_k]; + + out_half[cur_k] = a_row_half[src_pos]; + } + } + }; + + for (int i = 0; i < cur_block_rows; i++) { + int cur_row = start_row + i; + if (cur_row < size_m) { + permute_row(cur_row); + } + } +} + +template shared + // fetch pipeline + const bool has_act_order, // whether act_order is enabled + const int group_blocks = -1 // number of consecutive 16x16 blocks + // with a separate quantization scale + > +__global__ void Marlin( + const int4* __restrict__ A, // fp16 input matrix of shape mxk + const int4* __restrict__ B, // 4bit quantized weight matrix of shape kxn + int4* __restrict__ C, // fp16 output buffer of shape mxn + const int4* __restrict__ scales_ptr, // fp16 quantization scales of shape + // (k/groupsize)xn + const int* __restrict__ g_idx, // int32 group indices of shape k + int num_groups, // number of scale groups per output channel + int prob_m, // batch dimension m + int prob_n, // output dimension n + int prob_k, // reduction dimension k + int* locks // extra global storage for barrier synchronization +) { + // Each threadblock processes one "stripe" of the B matrix with (roughly) the + // same size, which might involve multiple column "slices" (of width 16 * + // `thread_n_blocks`). Stripes are defined as shown in the 3x3 matrix 5 SM + // example: + // 0 1 3 + // 0 2 3 + // 1 2 4 + // While this kind of partitioning makes things somewhat more complicated, it + // ensures good utilization of all SMs for many kinds of shape and GPU + // configurations, while requiring as few slow global cross-threadblock + // reductions as possible. + using Dtype = ScalarType; + using scalar_t2 = typename ScalarType::scalar_t2; + using FragA = typename ScalarType::FragA; + using FragB = typename ScalarType::FragB; + using FragC = typename ScalarType::FragC; + using FragS = typename ScalarType::FragS; + + constexpr int pack_factor = 32 / num_bits; + + // For larger GEMMs we run multiple batchsize 64 versions in parallel for a + // better partitioning with less reductions + int parallel = 1; + if (prob_m > 16 * thread_m_blocks) { + parallel = prob_m / (16 * thread_m_blocks); + prob_m = 16 * thread_m_blocks; + } + + int k_tiles = prob_k / 16 / thread_k_blocks; + int n_tiles = prob_n / 16 / thread_n_blocks; + int iters = div_ceil(k_tiles * n_tiles * parallel, gridDim.x); + + if constexpr (!has_act_order && group_blocks != -1) { + if (group_blocks >= thread_k_blocks) { + // Ensure that the number of tiles in each stripe is a multiple of the + // groupsize; this avoids an annoying special case where a stripe starts + // in the middle of group. + iters = (group_blocks / thread_k_blocks) * + div_ceil(iters, (group_blocks / thread_k_blocks)); + } + } + + int slice_row = (iters * blockIdx.x) % k_tiles; + int slice_col_par = (iters * blockIdx.x) / k_tiles; + int slice_col = slice_col_par; + int slice_iters; // number of threadblock tiles in the current slice + int slice_count = + 0; // total number of active threadblocks in the current slice + int slice_idx; // index of threadblock in current slice; numbered bottom to + // top + + // We can easily implement parallel problem execution by just remapping + // indices and advancing global pointers + if (slice_col_par >= n_tiles) { + A += (slice_col_par / n_tiles) * 16 * thread_m_blocks * prob_k / 8; + C += (slice_col_par / n_tiles) * 16 * thread_m_blocks * prob_n / 8; + locks += (slice_col_par / n_tiles) * n_tiles; + slice_col = slice_col_par % n_tiles; + } + + // Compute all information about the current slice which is required for + // synchronization. + auto init_slice = [&]() { + slice_iters = + iters * (blockIdx.x + 1) - (k_tiles * slice_col_par + slice_row); + if (slice_iters < 0 || slice_col_par >= n_tiles * parallel) slice_iters = 0; + if (slice_iters == 0) return; + if (slice_row + slice_iters > k_tiles) slice_iters = k_tiles - slice_row; + slice_count = 1; + slice_idx = 0; + int col_first = iters * div_ceil(k_tiles * slice_col_par, iters); + if (col_first <= k_tiles * (slice_col_par + 1)) { + int col_off = col_first - k_tiles * slice_col_par; + slice_count = div_ceil(k_tiles - col_off, iters); + if (col_off > 0) slice_count++; + int delta_first = iters * blockIdx.x - col_first; + if (delta_first < 0 || (col_off == 0 && delta_first == 0)) + slice_idx = slice_count - 1; + else { + slice_idx = slice_count - 1 - delta_first / iters; + if (col_off > 0) slice_idx--; + } + } + if (slice_col == n_tiles) { + A += 16 * thread_m_blocks * prob_k / 8; + C += 16 * thread_m_blocks * prob_n / 8; + locks += n_tiles; + slice_col = 0; + } + }; + init_slice(); + + // A sizes/strides + + // stride of the A matrix in global memory + int a_gl_stride = prob_k / 8; + // stride of an A matrix tile in shared memory + constexpr int a_sh_stride = 16 * thread_k_blocks / 8; + // delta between subsequent A tiles in global memory + constexpr int a_gl_rd_delta_o = 16 * thread_k_blocks / 8; + // between subsequent accesses within a tile + int a_gl_rd_delta_i = a_gl_stride * (threads / a_gl_rd_delta_o); + // between shared memory writes + constexpr int a_sh_wr_delta = a_sh_stride * (threads / a_gl_rd_delta_o); + // between shared memory tile reads + constexpr int a_sh_rd_delta_o = 2 * ((threads / 32) / (thread_n_blocks / 4)); + // within a shared memory tile + constexpr int a_sh_rd_delta_i = a_sh_stride * 16; + // overall size of a tile + constexpr int a_sh_stage = a_sh_stride * (16 * thread_m_blocks); + // number of shared write iterations for a tile + constexpr int a_sh_wr_iters = div_ceil(a_sh_stage, a_sh_wr_delta); + + // B sizes/strides + int b_gl_stride = 16 * prob_n / (pack_factor * 4); + constexpr int b_sh_stride = ((thread_n_blocks * 16) * 16 / pack_factor) / 4; + constexpr int b_thread_vecs = num_bits == 4 ? 1 : 2; + constexpr int b_sh_stride_threads = b_sh_stride / b_thread_vecs; + + int b_gl_rd_delta_o = b_gl_stride * thread_k_blocks; + int b_gl_rd_delta_i = b_gl_stride * (threads / b_sh_stride_threads); + constexpr int b_sh_wr_delta = threads * b_thread_vecs; + constexpr int b_sh_rd_delta = threads * b_thread_vecs; + constexpr int b_sh_stage = b_sh_stride * thread_k_blocks; + constexpr int b_sh_wr_iters = b_sh_stage / b_sh_wr_delta; + + // Scale sizes/strides without act_order + int s_gl_stride = prob_n / 8; + constexpr int s_sh_stride = 16 * thread_n_blocks / 8; + constexpr int s_tb_groups = + !has_act_order && group_blocks != -1 && group_blocks < thread_k_blocks + ? thread_k_blocks / group_blocks + : 1; + constexpr int s_sh_stage = s_tb_groups * s_sh_stride; + int s_gl_rd_delta = s_gl_stride; + + // Scale size/strides with act_order + constexpr int tb_k = 16 * thread_k_blocks; + constexpr int g_idx_stage = has_act_order ? (tb_k * sizeof(int)) / 16 : 0; + // constexpr int act_s_row_stride = 1; + // int act_s_col_stride = act_s_row_stride * num_groups; + int act_s_col_stride = 1; + int act_s_col_warp_stride = act_s_col_stride * 8; + int tb_n_warps = thread_n_blocks / 4; + int act_s_col_tb_stride = act_s_col_warp_stride * tb_n_warps; + + // Global A read index of current thread. + int a_gl_rd = a_gl_stride * (threadIdx.x / a_gl_rd_delta_o) + + (threadIdx.x % a_gl_rd_delta_o); + a_gl_rd += a_gl_rd_delta_o * slice_row; + // Shared write index of current thread. + int a_sh_wr = a_sh_stride * (threadIdx.x / a_gl_rd_delta_o) + + (threadIdx.x % a_gl_rd_delta_o); + // Shared read index. + int a_sh_rd = + a_sh_stride * ((threadIdx.x % 32) % 16) + (threadIdx.x % 32) / 16; + a_sh_rd += 2 * ((threadIdx.x / 32) / (thread_n_blocks / 4)); + + int b_gl_rd = b_gl_stride * (threadIdx.x / b_sh_stride_threads) + + (threadIdx.x % b_sh_stride_threads) * b_thread_vecs; + b_gl_rd += b_sh_stride * slice_col; + b_gl_rd += b_gl_rd_delta_o * slice_row; + int b_sh_wr = threadIdx.x * b_thread_vecs; + int b_sh_rd = threadIdx.x * b_thread_vecs; + + // For act_order + constexpr int k_iter_size = tb_k / b_sh_wr_iters; + int slice_k_start = tb_k * slice_row; + int slice_k_finish = slice_k_start + tb_k * slice_iters; + int slice_k_start_shared_fetch = slice_k_start; + int slice_n_offset = act_s_col_tb_stride * slice_col; + + // No act_order + int s_gl_rd; + if constexpr (!has_act_order) { + if constexpr (group_blocks == -1) { + s_gl_rd = s_sh_stride * slice_col + threadIdx.x; + } else { + s_gl_rd = s_gl_stride * ((thread_k_blocks * slice_row) / group_blocks) + + s_sh_stride * slice_col + threadIdx.x; + } + } + int s_sh_wr = threadIdx.x; + bool s_sh_wr_pred = threadIdx.x < s_sh_stride; + + // We use a different scale layout for grouped and column-wise quantization as + // we scale a `half2` tile in column-major layout in the former and in + // row-major in the latter case. + int s_sh_rd; + if constexpr (group_blocks != -1) + s_sh_rd = 8 * ((threadIdx.x / 32) % (thread_n_blocks / 4)) + + (threadIdx.x % 32) / 4; + else + s_sh_rd = 8 * ((threadIdx.x / 32) % (thread_n_blocks / 4)) + + (threadIdx.x % 32) % 4; + + // Precompute which thread should not read memory in which iterations; this is + // needed if there are more threads than required for a certain tilesize or + // when the batchsize is not a multiple of 16. + bool a_sh_wr_pred[a_sh_wr_iters]; + #pragma unroll + for (int i = 0; i < a_sh_wr_iters; i++) + a_sh_wr_pred[i] = a_sh_wr_delta * i + a_sh_wr < a_sh_stride * prob_m; + + // To ensure that writing and reading A tiles to/from shared memory, the + // latter in fragment format, is fully bank conflict free, we need to use a + // rather fancy XOR-based layout. The key here is that neither reads nor + // writes of the 16-byte `int4` blocks of 8 consecutive threads involve the + // same shared memory banks. Further, it seems (based on NSight-Compute) that + // each warp must also write a consecutive memory segment? + auto transform_a = [&](int i) { + int row = i / a_gl_rd_delta_o; + return a_gl_rd_delta_o * row + (i % a_gl_rd_delta_o) ^ row; + }; + // Since the computation of this remapping is non-trivial and, due to our main + // loop unrolls, all shared memory accesses are static, we simply precompute + // both transformed reads and writes. + int a_sh_wr_trans[a_sh_wr_iters]; + #pragma unroll + for (int i = 0; i < a_sh_wr_iters; i++) + a_sh_wr_trans[i] = transform_a(a_sh_wr_delta * i + a_sh_wr); + int a_sh_rd_trans[b_sh_wr_iters][thread_m_blocks]; + #pragma unroll + for (int i = 0; i < b_sh_wr_iters; i++) { + #pragma unroll + for (int j = 0; j < thread_m_blocks; j++) + a_sh_rd_trans[i][j] = + transform_a(a_sh_rd_delta_o * i + a_sh_rd_delta_i * j + a_sh_rd); + } + + // Since B-accesses have non-constant stride they have to be computed at + // runtime; we break dependencies between subsequent accesses with a tile by + // maintining multiple pointers (we have enough registers), a tiny + // optimization. + const int4* B_ptr[b_sh_wr_iters]; + #pragma unroll + for (int i = 0; i < b_sh_wr_iters; i++) + B_ptr[i] = B + b_gl_rd_delta_i * i + b_gl_rd; + + extern __shared__ int4 sh[]; + // Shared memory storage for global fetch pipelines. + int4* sh_a = sh; + int4* sh_b = sh_a + (stages * a_sh_stage); + int4* sh_g_idx = sh_b + (stages * b_sh_stage); + int4* sh_s = sh_g_idx + (stages * g_idx_stage); + + // Register storage for double buffer of shared memory reads. + FragA frag_a[2][thread_m_blocks]; + I4 frag_b_quant[2][b_thread_vecs]; + FragC frag_c[thread_m_blocks][4][2]; + FragS frag_s[2][4]; // No act-order + FragS act_frag_s[2][4][4]; // For act-order + + // Zero accumulators. + auto zero_accums = [&]() { + #pragma unroll + for (int i = 0; i < thread_m_blocks * 4 * 2 * 4; i++) + reinterpret_cast(frag_c)[i] = 0; + }; + + int sh_first_group_id = -1; + int sh_num_groups = -1; + constexpr int sh_max_num_groups = 32; + + auto fetch_scales_to_shared = [&](bool is_async, int first_group_id, + int last_group_id) { + sh_first_group_id = first_group_id; + sh_num_groups = last_group_id - first_group_id + 1; + + if (sh_num_groups < sh_max_num_groups) { + sh_num_groups = sh_max_num_groups; + } + + if (sh_first_group_id + sh_num_groups > num_groups) { + sh_num_groups = num_groups - sh_first_group_id; + } + + int row_offset = first_group_id * s_gl_stride; + + if (is_async) { + for (int i = 0; i < sh_num_groups; i++) { + if (threadIdx.x < s_sh_stride) { + cp_async4_pred(&sh_s[(i * s_sh_stride) + threadIdx.x], + &scales_ptr[row_offset + (i * s_gl_stride) + + slice_n_offset + threadIdx.x]); + } + } + } else { + for (int i = 0; i < sh_num_groups; i++) { + if (threadIdx.x < s_sh_stride) { + sh_s[(i * s_sh_stride) + threadIdx.x] = + scales_ptr[row_offset + (i * s_gl_stride) + slice_n_offset + + threadIdx.x]; + } + } + } + }; + // Asynchronously fetch the next A, B and s tile from global to the next + // shared memory pipeline location. + auto fetch_to_shared = [&](int pipe, int a_off, bool pred = true) { + if (pred) { + int4* sh_a_stage = sh_a + a_sh_stage * pipe; + #pragma unroll + for (int i = 0; i < a_sh_wr_iters; i++) { + cp_async4_pred( + &sh_a_stage[a_sh_wr_trans[i]], + &A[a_gl_rd_delta_i * i + a_gl_rd + a_gl_rd_delta_o * a_off], + a_sh_wr_pred[i]); + } + int4* sh_b_stage = sh_b + b_sh_stage * pipe; + #pragma unroll + for (int i = 0; i < b_sh_wr_iters; i++) { + #pragma unroll + for (int j = 0; j < b_thread_vecs; j++) { + cp_async4(&sh_b_stage[b_sh_wr_delta * i + b_sh_wr + j], B_ptr[i] + j); + } + + B_ptr[i] += b_gl_rd_delta_o; + } + + if constexpr (has_act_order) { + // Fetch g_idx thread-block portion + int full_pipe = a_off; + int cur_k = slice_k_start_shared_fetch + tb_k * full_pipe; + if (cur_k < prob_k && cur_k < slice_k_finish) { + int4* sh_g_idx_stage = sh_g_idx + g_idx_stage * pipe; + + int4 const* cur_g_idx_stage_ptr = + reinterpret_cast(&g_idx[cur_k]); + + if (threadIdx.x < g_idx_stage) { + cp_async4_pred(&sh_g_idx_stage[threadIdx.x], + &cur_g_idx_stage_ptr[threadIdx.x]); + } + } + } else { + if constexpr (group_blocks != -1) { + int4* sh_s_stage = sh_s + s_sh_stage * pipe; + + if constexpr (group_blocks >= thread_k_blocks) { + // Only fetch scales if this tile starts a new group + if (pipe % (group_blocks / thread_k_blocks) == 0) { + if (s_sh_wr_pred) { + cp_async4(&sh_s_stage[s_sh_wr], &scales_ptr[s_gl_rd]); + } + s_gl_rd += s_gl_rd_delta; + } + } else { + for (int i = 0; i < s_tb_groups; i++) { + if (s_sh_wr_pred) { + cp_async4(&sh_s_stage[i * s_sh_stride + s_sh_wr], + &scales_ptr[s_gl_rd]); + } + s_gl_rd += s_gl_rd_delta; + } + } + } + } + } + // Insert a fence even when we are winding down the pipeline to ensure that + // waiting is also correct at this point. + cp_async_fence(); + }; + + // Wait until the next thread tile has been loaded to shared memory. + auto wait_for_stage = [&]() { + // We only have `stages - 2` active fetches since we are double buffering + // and can only issue the next fetch when it is guaranteed that the previous + // shared memory load is fully complete (as it may otherwise be + // overwritten). + cp_async_wait(); + __syncthreads(); + }; + + // Load the next sub-tile from the current location in the shared memory pipe + // into the current register buffer. + auto fetch_to_registers = [&](int k, int pipe) { + int4* sh_a_stage = sh_a + a_sh_stage * pipe; + #pragma unroll + for (int i = 0; i < thread_m_blocks; i++) + ldsm4(frag_a[k % 2][i], + &sh_a_stage[a_sh_rd_trans[k % b_sh_wr_iters][i]]); + int4* sh_b_stage = sh_b + b_sh_stage * pipe; + + #pragma unroll + for (int i = 0; i < b_thread_vecs; i++) { + frag_b_quant[k % 2][i] = *reinterpret_cast( + &sh_b_stage[b_sh_rd_delta * (k % b_sh_wr_iters) + b_sh_rd + i]); + } + }; + + bool is_same_group[stages]; + int same_group_id[stages]; + + auto init_same_group = [&](int pipe) { + if constexpr (!has_act_order) { + is_same_group[pipe] = false; + same_group_id[pipe] = 0; + return; + } + + int4* sh_g_idx_stage = sh_g_idx + g_idx_stage * pipe; + int* sh_g_idx_int_ptr = reinterpret_cast(sh_g_idx_stage); + + int group_id_1 = sh_g_idx_int_ptr[0]; + int group_id_2 = sh_g_idx_int_ptr[tb_k - 1]; + + is_same_group[pipe] = group_id_1 == group_id_2; + same_group_id[pipe] = group_id_1; + }; + + auto fetch_scales_to_registers = [&](int k, int full_pipe) { + int pipe = full_pipe % stages; + + if constexpr (!has_act_order) { + // No act-order case + if constexpr (group_blocks != -1) { + if constexpr (group_blocks >= thread_k_blocks) { + int4* sh_s_stage = + sh_s + s_sh_stage * ((group_blocks / thread_k_blocks) * + (pipe / (group_blocks / thread_k_blocks))); + reinterpret_cast(&frag_s[k % 2])[0] = sh_s_stage[s_sh_rd]; + } else { + int warp_id = threadIdx.x / 32; + int n_warps = thread_n_blocks / 4; + + int warp_row = warp_id / n_warps; + + int cur_k = warp_row * 16; + cur_k += k_iter_size * (k % b_sh_wr_iters); + + int k_blocks = cur_k / 16; + int cur_group_id = k_blocks / group_blocks; + + int4* sh_s_stage = sh_s + s_sh_stage * pipe; + + reinterpret_cast(&frag_s[k % 2])[0] = + sh_s_stage[s_sh_rd + cur_group_id * s_sh_stride]; + } + } + + return; + } + + // Act-order case + + // Determine K of the "current" thread-block + int cur_k = slice_k_start + tb_k * full_pipe; + if (cur_k >= prob_k || cur_k >= slice_k_finish) { + return; + } + + // Reset (to current thread-block) since we read g_idx portion from the + // shared memory + cur_k = 0; + + // Progress to current iteration + cur_k += k_iter_size * (k % b_sh_wr_iters); + + // Determine "position" inside the thread-block (based on warp and + // thread-id) + int warp_id = threadIdx.x / 32; + int n_warps = + thread_n_blocks / 4; // Each warp processes 4 16-size tiles over N + + int warp_row = warp_id / n_warps; + int warp_col = warp_id % n_warps; + + cur_k += warp_row * 16; + + int th_id = threadIdx.x % 32; + cur_k += (th_id % 4) * 2; // Due to tensor-core layout for fp16 B matrix + + int s_col_shift = + /*slice_n_offset +*/ (act_s_col_warp_stride * warp_col) + + (th_id / 4) * act_s_col_stride; + + if (is_same_group[pipe]) { + if (k % 2 == 0) { + *(reinterpret_cast(&(act_frag_s[k % 2][0][0]))) = + sh_s[(same_group_id[pipe] - sh_first_group_id) * s_sh_stride + + s_col_shift]; + } else { + *(reinterpret_cast(&(act_frag_s[k % 2][0][0]))) = + *(reinterpret_cast(&(act_frag_s[(k - 1) % 2][0][0]))); + } + + for (int i = 1; i < 4; i++) { + *(reinterpret_cast(&(act_frag_s[k % 2][i][0]))) = + *(reinterpret_cast(&(act_frag_s[k % 2][0][0]))); + } + return; + } + + int4* sh_g_idx_stage = sh_g_idx + g_idx_stage * pipe; + int* sh_g_idx_int_ptr = reinterpret_cast(sh_g_idx_stage); + + constexpr int k_frag_offsets[4] = {0, 1, 8, + 9}; // Tensor core offsets per thread + + #pragma unroll + for (int i = 0; i < 4; i++) { + int actual_k = cur_k + k_frag_offsets[i]; + + int group_id = sh_g_idx_int_ptr[actual_k]; + int rel_group_id = group_id - sh_first_group_id; + + *(reinterpret_cast(&(act_frag_s[k % 2][i][0]))) = + sh_s[rel_group_id * s_sh_stride + s_col_shift]; + } + }; + + // Execute the actual tensor core matmul of a sub-tile. + auto matmul = [&](int k) { + // We have the m dimension as the inner loop in order to encourage overlapping + // dequantization and matmul operations. + #pragma unroll + for (int j = 0; j < 4; j++) { + FragB frag_b0; + FragB frag_b1; + if constexpr (num_bits == 4) { + int b_quant = frag_b_quant[k % 2][0][j]; + int b_quant_shift = b_quant >> 8; + + frag_b0 = dequant_4bit(b_quant); + frag_b1 = dequant_4bit(b_quant_shift); + + } else { + int* frag_b_quant_ptr = reinterpret_cast(frag_b_quant[k % 2]); + int b_quant_0 = frag_b_quant_ptr[j * 2 + 0]; + int b_quant_1 = frag_b_quant_ptr[j * 2 + 1]; + + frag_b0 = dequant_8bit(b_quant_0); + frag_b1 = dequant_8bit(b_quant_1); + } + + // Apply scale to frag_b0 + if constexpr (has_act_order) { + scale4(frag_b0, act_frag_s[k % 2][0][j], + act_frag_s[k % 2][1][j], act_frag_s[k % 2][2][j], + act_frag_s[k % 2][3][j], 0); + } else { + if constexpr (group_blocks != -1) { + scale(frag_b0, frag_s[k % 2][j], 0); + } + } + + // Apply scale to frag_b1 + if constexpr (has_act_order) { + scale4(frag_b1, act_frag_s[k % 2][0][j], + act_frag_s[k % 2][1][j], act_frag_s[k % 2][2][j], + act_frag_s[k % 2][3][j], 1); + + } else { + if constexpr (group_blocks != -1) { + scale(frag_b1, frag_s[k % 2][j], 1); + } + } + + #pragma unroll + for (int i = 0; i < thread_m_blocks; i++) { + mma(frag_a[k % 2][i], frag_b0, frag_c[i][j][0]); + mma(frag_a[k % 2][i], frag_b1, frag_c[i][j][1]); + } + } + }; + + // Since we slice across the k dimension of a tile in order to increase the + // number of warps while keeping the n dimension of a tile reasonable, we have + // multiple warps that accumulate their partial sums of the same output + // location; which we have to reduce over in the end. We do in shared memory. + auto thread_block_reduce = [&]() { + constexpr int red_off = threads / b_sh_stride_threads / 2; + if (red_off >= 1) { + int red_idx = threadIdx.x / b_sh_stride_threads; + constexpr int red_sh_stride = b_sh_stride_threads * 4 * 2; + constexpr int red_sh_delta = b_sh_stride_threads; + int red_sh_rd = red_sh_stride * (threadIdx.x / b_sh_stride_threads) + + (threadIdx.x % b_sh_stride_threads); + + // Parallel logarithmic shared memory reduction. We make sure to avoid any + // unnecessary read or write iterations, e.g., for two warps we write only + // once by warp 1 and read only once by warp 0. + + #pragma unroll + for (int m_block = 0; m_block < thread_m_blocks; m_block++) { + #pragma unroll + for (int i = red_off; i > 0; i /= 2) { + if (i <= red_idx && red_idx < 2 * i) { + #pragma unroll + for (int j = 0; j < 4 * 2; j++) { + int red_sh_wr = + red_sh_delta * j + (red_sh_rd - red_sh_stride * i); + if (i < red_off) { + float* c_rd = + reinterpret_cast(&sh[red_sh_delta * j + red_sh_rd]); + float* c_wr = reinterpret_cast(&sh[red_sh_wr]); + #pragma unroll + for (int k = 0; k < 4; k++) + reinterpret_cast(frag_c)[4 * 2 * m_block + j][k] += + c_rd[k] + c_wr[k]; + } + sh[red_sh_wr] = + reinterpret_cast(&frag_c)[4 * 2 * m_block + j]; + } + } + __syncthreads(); + } + if (red_idx == 0) { + #pragma unroll + for (int i = 0; i < 4 * 2; i++) { + float* c_rd = + reinterpret_cast(&sh[red_sh_delta * i + red_sh_rd]); + #pragma unroll + for (int j = 0; j < 4; j++) + reinterpret_cast(frag_c)[4 * 2 * m_block + i][j] += + c_rd[j]; + } + } + __syncthreads(); + } + } + }; + + // Since multiple threadblocks may process parts of the same column slice, we + // finally have to globally reduce over the results. As the striped + // partitioning minimizes the number of such reductions and our outputs are + // usually rather small, we perform this reduction serially in L2 cache. + auto global_reduce = [&](bool first = false, bool last = false) { + // We are very careful here to reduce directly in the output buffer to + // maximize L2 cache utilization in this step. To do this, we write out + // results in FP16 (but still reduce with FP32 compute). + constexpr int active_threads = 32 * thread_n_blocks / 4; + if (threadIdx.x < active_threads) { + int c_gl_stride = prob_n / 8; + int c_gl_wr_delta_o = 8 * c_gl_stride; + int c_gl_wr_delta_i = 4 * (active_threads / 32); + int c_gl_wr = c_gl_stride * ((threadIdx.x % 32) / 4) + + 4 * (threadIdx.x / 32) + threadIdx.x % 4; + c_gl_wr += (2 * thread_n_blocks) * slice_col; + constexpr int c_sh_wr_delta = active_threads; + int c_sh_wr = threadIdx.x; + + int row = (threadIdx.x % 32) / 4; + + if (!first) { + // Interestingly, doing direct global accesses here really seems to mess up + // the compiler and lead to slowdowns, hence we also use async-copies even + // though these fetches are not actually asynchronous. + #pragma unroll + for (int i = 0; i < thread_m_blocks * 4; i++) { + cp_async4_pred( + &sh[c_sh_wr + c_sh_wr_delta * i], + &C[c_gl_wr + c_gl_wr_delta_o * (i / 2) + + c_gl_wr_delta_i * (i % 2)], + i < (thread_m_blocks - 1) * 4 || 8 * (i / 2) + row < prob_m); + } + cp_async_fence(); + cp_async_wait<0>(); + } + + #pragma unroll + for (int i = 0; i < thread_m_blocks * 4; i++) { + if (i < (thread_m_blocks - 1) * 4 || 8 * (i / 2) + row < prob_m) { + if (!first) { + int4 c_red = sh[c_sh_wr + i * c_sh_wr_delta]; + #pragma unroll + for (int j = 0; j < 2 * 4; j++) { + reinterpret_cast( + &frag_c)[4 * 2 * 4 * (i / 4) + 4 * j + (i % 4)] += + Dtype::num2float(reinterpret_cast(&c_red)[j]); + } + } + if (!last) { + int4 c; + #pragma unroll + for (int j = 0; j < 2 * 4; j++) { + reinterpret_cast(&c)[j] = + Dtype::float2num(reinterpret_cast( + &frag_c)[4 * 2 * 4 * (i / 4) + 4 * j + (i % 4)]); + } + C[c_gl_wr + c_gl_wr_delta_o * (i / 2) + c_gl_wr_delta_i * (i % 2)] = + c; + } + } + } + } + }; + + // Write out the reduce final result in the correct layout. We only actually + // reshuffle matrix fragments in this step, the reduction above is performed + // in fragment layout. + auto write_result = [&]() { + int c_gl_stride = prob_n / 8; + constexpr int c_sh_stride = 2 * thread_n_blocks + 1; + int c_gl_wr_delta = c_gl_stride * (threads / (2 * thread_n_blocks)); + constexpr int c_sh_rd_delta = + c_sh_stride * (threads / (2 * thread_n_blocks)); + + int c_gl_wr = c_gl_stride * (threadIdx.x / (2 * thread_n_blocks)) + + (threadIdx.x % (2 * thread_n_blocks)); + c_gl_wr += (2 * thread_n_blocks) * slice_col; + int c_sh_wr = + (4 * c_sh_stride) * ((threadIdx.x % 32) / 4) + (threadIdx.x % 32) % 4; + c_sh_wr += 32 * (threadIdx.x / 32); + int c_sh_rd = c_sh_stride * (threadIdx.x / (2 * thread_n_blocks)) + + (threadIdx.x % (2 * thread_n_blocks)); + + int c_gl_wr_end = c_gl_stride * prob_m; + + // We first reorder in shared memory to guarantee the most efficient final + // global write patterns + auto write = [&](int idx, float c0, float c1, FragS& s) { + scalar_t2 res = + Dtype::nums2num2(Dtype::float2num(c0), Dtype::float2num(c1)); + + // For per-column quantization we finally apply the scale here (only for + // 4-bit) + if constexpr (!has_act_order && group_blocks == -1 && num_bits == 4) { + res = __hmul2(res, s[0]); + } + + ((scalar_t2*)sh)[idx] = res; + }; + + if (threadIdx.x / 32 < thread_n_blocks / 4) { + #pragma unroll + for (int i = 0; i < thread_m_blocks; i++) { + #pragma unroll + for (int j = 0; j < 4; j++) { + int wr = c_sh_wr + 8 * j; + write(wr + (4 * c_sh_stride) * 0 + 0, frag_c[i][j][0][0], + frag_c[i][j][0][1], frag_s[j / 2][2 * (j % 2) + 0]); + write(wr + (4 * c_sh_stride) * 8 + 0, frag_c[i][j][0][2], + frag_c[i][j][0][3], frag_s[j / 2][2 * (j % 2) + 0]); + write(wr + (4 * c_sh_stride) * 0 + 4, frag_c[i][j][1][0], + frag_c[i][j][1][1], frag_s[j / 2][2 * (j % 2) + 1]); + write(wr + (4 * c_sh_stride) * 8 + 4, frag_c[i][j][1][2], + frag_c[i][j][1][3], frag_s[j / 2][2 * (j % 2) + 1]); + } + c_sh_wr += 16 * (4 * c_sh_stride); + } + } + __syncthreads(); + + #pragma unroll + for (int i = 0; + i < div_ceil(16 * thread_m_blocks, threads / (2 * thread_n_blocks)); + i++) { + if (c_gl_wr < c_gl_wr_end) { + C[c_gl_wr] = sh[c_sh_rd]; + c_gl_wr += c_gl_wr_delta; + c_sh_rd += c_sh_rd_delta; + } + } + }; + + // Start global fetch and register load pipelines. + auto start_pipes = [&]() { + + #pragma unroll + for (int i = 0; i < stages - 1; i++) { + if (has_act_order && i == 0) { + int last_g_idx = slice_k_start + stages * tb_k * 2; + if (last_g_idx >= prob_k) { + last_g_idx = prob_k - 1; + } + fetch_scales_to_shared(true, g_idx[slice_k_start], g_idx[last_g_idx]); + } + fetch_to_shared(i, i, i < slice_iters); + } + + zero_accums(); + wait_for_stage(); + init_same_group(0); + fetch_to_registers(0, 0); + fetch_scales_to_registers(0, 0); + a_gl_rd += a_gl_rd_delta_o * (stages - 1); + slice_k_start_shared_fetch += tb_k * (stages - 1); + }; + if (slice_iters) { + start_pipes(); + } + + // Main loop. + while (slice_iters) { + // We unroll over both the global fetch and the register load pipeline to + // ensure all shared memory accesses are static. Note that both pipelines + // have even length meaning that the next iteration will always start at + // index 0. + + #pragma unroll + for (int pipe = 0; pipe < stages;) { + #pragma unroll + for (int k = 0; k < b_sh_wr_iters; k++) { + fetch_to_registers(k + 1, pipe % stages); + fetch_scales_to_registers(k + 1, pipe); + if (k == b_sh_wr_iters - 2) { + fetch_to_shared((pipe + stages - 1) % stages, pipe, + slice_iters >= stages); + pipe++; + wait_for_stage(); + init_same_group(pipe % stages); + } + matmul(k); + } + slice_iters--; + if (slice_iters == 0) { + break; + } + } + + a_gl_rd += a_gl_rd_delta_o * stages; + slice_k_start += tb_k * stages; + slice_k_start_shared_fetch += tb_k * stages; + + if constexpr (has_act_order) { + int first_group_id = g_idx[slice_k_start]; + int last_g_idx = slice_k_start + stages * tb_k * 2; + if (last_g_idx >= prob_k) { + last_g_idx = prob_k - 1; + } + int last_group_id = g_idx[last_g_idx]; + if (last_group_id >= sh_first_group_id + sh_num_groups) { + fetch_scales_to_shared(false, first_group_id, last_group_id); + __syncthreads(); + } + } + + // Process results and, if necessary, proceed to the next column slice. + // While this pattern may not be the most readable, other ways of writing + // the loop seemed to noticeably worse performance after compilation. + if (slice_iters == 0) { + cp_async_wait<0>(); + bool last = slice_idx == slice_count - 1; + // For per-column scales, we only fetch them here in the final step before + // write-out + if constexpr (!has_act_order && group_blocks == -1) { + if constexpr (num_bits == 8) { + if (s_sh_wr_pred) { + cp_async4(&sh_s[s_sh_wr], &scales_ptr[s_gl_rd]); + } + cp_async_fence(); + } else { + if (last) { + if (s_sh_wr_pred) { + cp_async4(&sh_s[s_sh_wr], &scales_ptr[s_gl_rd]); + } + cp_async_fence(); + } + } + } + + thread_block_reduce(); + if constexpr (!has_act_order && group_blocks == -1) { + if constexpr (num_bits == 8) { + cp_async_wait<0>(); + __syncthreads(); + if (threadIdx.x / 32 < thread_n_blocks / 4) { + reinterpret_cast(&frag_s)[0] = sh_s[s_sh_rd + 0]; + reinterpret_cast(&frag_s)[1] = sh_s[s_sh_rd + 4]; + } + + } else { + if (last) { + cp_async_wait<0>(); + __syncthreads(); + if (threadIdx.x / 32 < thread_n_blocks / 4) { + reinterpret_cast(&frag_s)[0] = sh_s[s_sh_rd + 0]; + reinterpret_cast(&frag_s)[1] = sh_s[s_sh_rd + 4]; + } + } + } + } + + // For 8-bit channelwise, we apply the scale before the global reduction + // that converts the fp32 results to fp16 (so that we avoid possible + // overflow in fp16) + if constexpr (!has_act_order && group_blocks == -1 && num_bits == 8) { + if (threadIdx.x / 32 < thread_n_blocks / 4) { + #pragma unroll + for (int i = 0; i < thread_m_blocks; i++) { + #pragma unroll + for (int j = 0; j < 4; j++) { + scale_float( + reinterpret_cast(&frag_c[i][j][0][0]), + frag_s[j / 2][2 * (j % 2) + 0]); + scale_float( + reinterpret_cast(&frag_c[i][j][0][2]), + frag_s[j / 2][2 * (j % 2) + 0]); + + scale_float( + reinterpret_cast(&frag_c[i][j][1][0]), + frag_s[j / 2][2 * (j % 2) + 1]); + scale_float( + reinterpret_cast(&frag_c[i][j][1][2]), + frag_s[j / 2][2 * (j % 2) + 1]); + } + } + } + } + + if (slice_count > 1) { // only globally reduce if there is more than one + // block in a slice + barrier_acquire(&locks[slice_col], slice_idx); + global_reduce(slice_idx == 0, last); + barrier_release(&locks[slice_col], last); + } + if (last) // only the last block in a slice actually writes the result + write_result(); + slice_row = 0; + slice_col_par++; + slice_col++; + init_slice(); + if (slice_iters) { + a_gl_rd = a_gl_stride * (threadIdx.x / a_gl_rd_delta_o) + + (threadIdx.x % a_gl_rd_delta_o); + #pragma unroll + for (int i = 0; i < b_sh_wr_iters; i++) + B_ptr[i] += b_sh_stride - b_gl_rd_delta_o * k_tiles; + if (slice_col == 0) { + #pragma unroll + for (int i = 0; i < b_sh_wr_iters; i++) B_ptr[i] -= b_gl_stride; + } + + // Update slice k/n for scales loading + if constexpr (has_act_order) { + slice_k_start = tb_k * slice_row; + slice_k_finish = slice_k_start + tb_k * slice_iters; + slice_k_start_shared_fetch = slice_k_start; + slice_n_offset = act_s_col_tb_stride * slice_col; + + } else { + s_gl_rd = s_sh_stride * slice_col + threadIdx.x; + } + + start_pipes(); + } + } + } +} + + #define __CALL_IF(NUM_BITS, THREAD_M_BLOCKS, THREAD_N_BLOCKS, \ + THREAD_K_BLOCKS, HAS_ACT_ORDER, GROUP_BLOCKS, NUM_THREADS) \ + else if (num_bits == NUM_BITS && thread_m_blocks == THREAD_M_BLOCKS && \ + thread_n_blocks == THREAD_N_BLOCKS && \ + thread_k_blocks == THREAD_K_BLOCKS && \ + has_act_order == HAS_ACT_ORDER && group_blocks == GROUP_BLOCKS && \ + num_threads == NUM_THREADS) { \ + cudaFuncSetAttribute( \ + Marlin, \ + cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem); \ + Marlin<<>>( \ + A_ptr, B_ptr, C_ptr, s_ptr, g_idx_ptr, num_groups, prob_m, prob_n, \ + prob_k, locks); \ + } + +typedef struct { + int thread_k; + int thread_n; + int num_threads; +} thread_config_t; + +typedef struct { + int max_m_blocks; + thread_config_t tb_cfg; +} exec_config_t; + +thread_config_t small_batch_thread_configs[] = { + // Ordered by priority + + // thread_k, thread_n, num_threads + {128, 128, 256}, + {64, 128, 128}, + {128, 64, 128}, +}; + +thread_config_t large_batch_thread_configs[] = { + // Ordered by priority + + // thread_k, thread_n, num_threads + {64, 256, 256}, + {64, 128, 128}, + {128, 64, 128}, + +}; + +int get_scales_cache_size(thread_config_t const& th_config, int prob_m, + int prob_n, int prob_k, int num_bits, int group_size, + bool has_act_order, bool is_k_full) { + bool cache_scales_chunk = has_act_order && !is_k_full; + + int tb_n = th_config.thread_n; + int tb_k = th_config.thread_k; + + // Get max scale groups per thread-block + int tb_groups; + if (group_size == -1) { + tb_groups = 1; + } else if (group_size == 0) { + tb_groups = div_ceil(tb_k, 32); // Worst case is 32 group size + } else { + tb_groups = div_ceil(tb_k, group_size); + } + + if (cache_scales_chunk) { + int load_groups = + tb_groups * pipe_stages * 2; // Chunk size is 2x pipeline over dim K + load_groups = max(load_groups, 32); // We load at least 32 scale groups + return load_groups * tb_n * 2; + + } else { + int tb_scales = tb_groups * tb_n * 2; + + return tb_scales * pipe_stages; + } +} + +bool is_valid_cache_size(thread_config_t const& th_config, int max_m_blocks, + int prob_m, int prob_n, int prob_k, int num_bits, + int scales_cache_size, int max_shared_mem) { + int pack_factor = 32 / num_bits; + + // Get B size + int tb_k = th_config.thread_k; + int tb_n = th_config.thread_n; + + int b_size = (tb_k * tb_n / pack_factor) * 4; + + // Get A size + int m_blocks = div_ceil(prob_m, 16); + int tb_max_m = 16; + + while (true) { + if (m_blocks >= max_m_blocks) { + tb_max_m *= max_m_blocks; + break; + } + + max_m_blocks--; + if (max_m_blocks == 0) { + TORCH_CHECK(false, "Unexpected m_blocks = ", m_blocks); + } + } + + int a_size = (tb_max_m * tb_k) * 2; + + float pipe_size = (a_size + b_size) * pipe_stages; + + TORCH_CHECK(max_shared_mem / 2 > scales_cache_size); // Sanity + + return pipe_size < 0.95f * (max_shared_mem - scales_cache_size); +} + +bool is_valid_config(thread_config_t const& th_config, int max_m_blocks, + int prob_m, int prob_n, int prob_k, int num_bits, + int group_size, bool has_act_order, bool is_k_full, + int max_shared_mem) { + // Sanity + if (th_config.thread_k == -1 || th_config.thread_n == -1 || + th_config.num_threads == -1) { + return false; + } + + // Verify K/N are divisible by thread K/N + if (prob_k % th_config.thread_k != 0 || prob_n % th_config.thread_n != 0) { + return false; + } + + // Verify min for thread K/N + if (th_config.thread_n < min_thread_n || th_config.thread_k < min_thread_k) { + return false; + } + + // num_threads must be at least 128 (= 4 warps) + if (th_config.num_threads < 128) { + return false; + } + + // Determine cache for scales + int scales_cache_size = + get_scales_cache_size(th_config, prob_m, prob_n, prob_k, num_bits, + group_size, has_act_order, is_k_full); + + // Check that pipeline fits into cache + if (!is_valid_cache_size(th_config, max_m_blocks, prob_m, prob_n, prob_k, + num_bits, scales_cache_size, max_shared_mem)) { + return false; + } + + return true; +} + +exec_config_t determine_thread_config(int prob_m, int prob_n, int prob_k, + int num_bits, int group_size, + bool has_act_order, bool is_k_full, + int max_shared_mem) { + int max_m_blocks = 4; + while (max_m_blocks > 0) { + if (prob_m <= 16) { + for (auto th_config : small_batch_thread_configs) { + if (is_valid_config(th_config, max_m_blocks, prob_m, prob_n, prob_k, + num_bits, group_size, has_act_order, is_k_full, + max_shared_mem)) { + return exec_config_t{max_m_blocks, th_config}; + } + } + } else { + for (auto th_config : large_batch_thread_configs) { + if (is_valid_config(th_config, max_m_blocks, prob_m, prob_n, prob_k, + num_bits, group_size, has_act_order, is_k_full, + max_shared_mem)) { + return exec_config_t{max_m_blocks, th_config}; + } + } + } + + max_m_blocks--; // Process less M blocks per invocation to reduce cache + // usage + } + + return exec_config_t{0, {-1, -1, -1}}; +} + + #define CALL_IF(NUM_BITS, N_BLOCKS, K_BLOCKS, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 1, N_BLOCKS, K_BLOCKS, true, 0, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 2, N_BLOCKS, K_BLOCKS, true, 0, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 3, N_BLOCKS, K_BLOCKS, true, 0, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 4, N_BLOCKS, K_BLOCKS, true, 0, NUM_THREADS) \ + \ + __CALL_IF(NUM_BITS, 1, N_BLOCKS, K_BLOCKS, false, -1, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 1, N_BLOCKS, K_BLOCKS, false, 2, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 1, N_BLOCKS, K_BLOCKS, false, 4, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 1, N_BLOCKS, K_BLOCKS, false, 8, NUM_THREADS) \ + \ + __CALL_IF(NUM_BITS, 2, N_BLOCKS, K_BLOCKS, false, -1, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 2, N_BLOCKS, K_BLOCKS, false, 2, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 2, N_BLOCKS, K_BLOCKS, false, 4, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 2, N_BLOCKS, K_BLOCKS, false, 8, NUM_THREADS) \ + \ + __CALL_IF(NUM_BITS, 3, N_BLOCKS, K_BLOCKS, false, -1, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 3, N_BLOCKS, K_BLOCKS, false, 2, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 3, N_BLOCKS, K_BLOCKS, false, 4, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 3, N_BLOCKS, K_BLOCKS, false, 8, NUM_THREADS) \ + \ + __CALL_IF(NUM_BITS, 4, N_BLOCKS, K_BLOCKS, false, -1, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 4, N_BLOCKS, K_BLOCKS, false, 2, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 4, N_BLOCKS, K_BLOCKS, false, 4, NUM_THREADS) \ + __CALL_IF(NUM_BITS, 4, N_BLOCKS, K_BLOCKS, false, 8, NUM_THREADS) + +template +void marlin_mm_f16i4(const void* A, const void* B, void* C, void* s, + void* g_idx, void* perm, void* a_tmp, int prob_m, + int prob_n, int prob_k, void* workspace, int num_bits, + bool has_act_order, bool is_k_full, int num_groups, + int group_size, int dev, cudaStream_t stream, int thread_k, + int thread_n, int sms, int max_par) { + TORCH_CHECK(num_bits == 4 || num_bits == 8, + "num_bits must be 4 or 8. Got = ", num_bits); + TORCH_CHECK(prob_m > 0 && prob_n > 0 && prob_k > 0, "Invalid MNK = [", prob_m, + ", ", prob_n, ", ", prob_k, "]"); + + int tot_m = prob_m; + int tot_m_blocks = div_ceil(tot_m, 16); + int pad = 16 * tot_m_blocks - tot_m; + + if (sms == -1) { + cudaDeviceGetAttribute(&sms, cudaDevAttrMultiProcessorCount, dev); + } + + int max_shared_mem = 0; + cudaDeviceGetAttribute(&max_shared_mem, + cudaDevAttrMaxSharedMemoryPerBlockOptin, dev); + TORCH_CHECK(max_shared_mem > 0); + + // Set thread config + exec_config_t exec_cfg; + if (thread_k != -1 && thread_n != -1) { + // User-defined config + exec_cfg = + exec_config_t{4, thread_config_t{thread_k, thread_n, default_threads}}; + } else { + // Auto config + exec_cfg = + determine_thread_config(prob_m, prob_n, prob_k, num_bits, group_size, + has_act_order, is_k_full, max_shared_mem); + } + + TORCH_CHECK(exec_cfg.max_m_blocks > 0 && + is_valid_config(exec_cfg.tb_cfg, exec_cfg.max_m_blocks, + prob_m, prob_n, prob_k, num_bits, group_size, + has_act_order, is_k_full, max_shared_mem), + "Invalid thread config: max_m_blocks = ", exec_cfg.max_m_blocks, + ", thread_k = ", exec_cfg.tb_cfg.thread_k, + ", thread_n = ", exec_cfg.tb_cfg.thread_n, + ", num_threads = ", exec_cfg.tb_cfg.num_threads, " for MKN = [", + prob_m, ", ", prob_k, ", ", prob_n, "] and num_bits = ", num_bits, + ", group_size = ", group_size, + ", has_act_order = ", has_act_order, ", is_k_full = ", is_k_full, + ", max_shared_mem = ", max_shared_mem); + + int num_threads = exec_cfg.tb_cfg.num_threads; + thread_k = exec_cfg.tb_cfg.thread_k; + thread_n = exec_cfg.tb_cfg.thread_n; + + int thread_k_blocks = thread_k / 16; + int thread_n_blocks = thread_n / 16; + + int blocks = sms; + + TORCH_CHECK(prob_n % thread_n == 0, "prob_n = ", prob_n, + " is not divisible by thread_n = ", thread_n); + TORCH_CHECK(prob_k % thread_k == 0, "prob_k = ", prob_k, + " is not divisible by thread_k = ", thread_k); + + int group_blocks = 0; + if (has_act_order) { + if (is_k_full) { + TORCH_CHECK(group_size != -1); + group_blocks = group_size / 16; + TORCH_CHECK(prob_k % group_blocks == 0, "prob_k = ", prob_k, + " is not divisible by group_blocks = ", group_blocks); + } else { + TORCH_CHECK(group_size == 0); + group_blocks = 0; + } + + } else { + if (group_size == -1) { + group_blocks = -1; + } else { + group_blocks = group_size / 16; + TORCH_CHECK(prob_k % group_blocks == 0, "prob_k = ", prob_k, + " is not divisible by group_blocks = ", group_blocks); + } + } + + const int4* A_ptr = (const int4*)A; + const int4* B_ptr = (const int4*)B; + int4* C_ptr = (int4*)C; + const int4* s_ptr = (const int4*)s; + const int* g_idx_ptr = (const int*)g_idx; + const int* perm_ptr = (const int*)perm; + int4* a_tmp_ptr = (int4*)a_tmp; + + int* locks = (int*)workspace; + + if (has_act_order) { + // Permute A columns + int block_rows = div_ceil(prob_m, blocks); + permute_cols_kernel<<>>( + A_ptr, perm_ptr, a_tmp_ptr, prob_m, prob_k, block_rows); + A_ptr = a_tmp_ptr; + } + + // If we have a full K, then we can run the non-act-order version of Marlin + // (since the weight rows are reordered by increasing group ids, and by having + // a full K, we have full original groups) + if (is_k_full) { + has_act_order = false; + } + + // Main loop + for (int i = 0; i < tot_m_blocks; i += exec_cfg.max_m_blocks) { + int thread_m_blocks = tot_m_blocks - i; + prob_m = tot_m - 16 * i; + int par = 1; + if (thread_m_blocks > exec_cfg.max_m_blocks) { + // Note that parallel > 1 currently only works for inputs without any + // padding + par = (16 * thread_m_blocks - pad) / (16 * exec_cfg.max_m_blocks); + if (par > max_par) par = max_par; + prob_m = (16 * exec_cfg.max_m_blocks) * par; + i += exec_cfg.max_m_blocks * (par - 1); + thread_m_blocks = exec_cfg.max_m_blocks; + } + + // Define kernel configurations + if (false) { + } + CALL_IF(4, 32, 2, 256) + CALL_IF(4, 16, 4, 256) + CALL_IF(4, 8, 8, 256) + CALL_IF(4, 8, 4, 128) + CALL_IF(4, 4, 8, 128) + CALL_IF(8, 32, 2, 256) + CALL_IF(8, 16, 4, 256) + CALL_IF(8, 8, 8, 256) + CALL_IF(8, 8, 4, 128) + CALL_IF(8, 4, 8, 128) + else { + TORCH_CHECK(false, "Unsupported shapes: MNK = [" + str(prob_m) + ", " + + str(prob_n) + ", " + str(prob_k) + "]" + + ", has_act_order = " + str(has_act_order) + + ", num_groups = " + str(num_groups) + + ", group_size = " + str(group_size) + + ", thread_m_blocks = " + str(thread_m_blocks) + + ", thread_n_blocks = " + str(thread_n_blocks) + + ", thread_k_blocks = " + str(thread_k_blocks)); + } + + A_ptr += 16 * thread_m_blocks * (prob_k / 8) * par; + C_ptr += 16 * thread_m_blocks * (prob_n / 8) * par; + } +} + +} // namespace gptq_marlin + +torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight, + torch::Tensor& b_scales, torch::Tensor& g_idx, + torch::Tensor& perm, torch::Tensor& workspace, + int64_t num_bits, int64_t size_m, int64_t size_n, + int64_t size_k, bool is_k_full) { + // Verify num_bits + TORCH_CHECK(num_bits == 4 || num_bits == 8, + "num_bits must be 4 or 8. Got = ", num_bits); + int pack_factor = 32 / num_bits; + + // Verify A + TORCH_CHECK(a.size(0) == size_m, "Shape mismatch: a.size(0) = ", a.size(0), + ", size_m = ", size_m); + TORCH_CHECK(a.size(1) == size_k, "Shape mismatch: a.size(1) = ", a.size(1), + ", size_k = ", size_k); + + // Verify B + TORCH_CHECK(size_k % gptq_marlin::tile_size == 0, "size_k = ", size_k, + " is not divisible by tile_size = ", gptq_marlin::tile_size); + TORCH_CHECK((size_k / gptq_marlin::tile_size) == b_q_weight.size(0), + "Shape mismatch: b_q_weight.size(0) = ", b_q_weight.size(0), + ", size_k = ", size_k, ", tile_size = ", gptq_marlin::tile_size); + TORCH_CHECK(b_q_weight.size(1) % gptq_marlin::tile_size == 0, + "b_q_weight.size(1) = ", b_q_weight.size(1), + " is not divisible by tile_size = ", gptq_marlin::tile_size); + int actual_size_n = + (b_q_weight.size(1) / gptq_marlin::tile_size) * pack_factor; + TORCH_CHECK(size_n == actual_size_n, "size_n = ", size_n, + ", actual_size_n = ", actual_size_n); + + // Verify device and strides + TORCH_CHECK(a.device().is_cuda(), "A is not on GPU"); + TORCH_CHECK(a.is_contiguous(), "A is not contiguous"); + + TORCH_CHECK(b_q_weight.device().is_cuda(), "b_q_weight is not on GPU"); + TORCH_CHECK(b_q_weight.is_contiguous(), "b_q_weight is not contiguous"); + + TORCH_CHECK(b_scales.device().is_cuda(), "b_scales is not on GPU"); + TORCH_CHECK(b_scales.is_contiguous(), "b_scales is not contiguous"); + + TORCH_CHECK(g_idx.device().is_cuda(), "g_idx is not on GPU"); + TORCH_CHECK(g_idx.is_contiguous(), "g_idx is not contiguous"); + + TORCH_CHECK(perm.device().is_cuda(), "perm is not on GPU"); + TORCH_CHECK(perm.is_contiguous(), "perm is not contiguous"); + + // Alloc buffers + const at::cuda::OptionalCUDAGuard device_guard(device_of(a)); + auto options = torch::TensorOptions().dtype(a.dtype()).device(a.device()); + torch::Tensor c = torch::empty({size_m, size_n}, options); + torch::Tensor a_tmp = torch::empty({size_m, size_k}, options); + + // thread_k: `k` size of a thread_tile in `weights` (can usually be left as + // auto -1) + int thread_k = -1; + // thread_n: `n` size of a thread_tile in `weights` (can usually be left as + // auto -1) + int thread_n = -1; + // sms: number of SMs to use for the kernel (can usually be left as auto -1) + int sms = -1; + + // Verify g_idx and perm + TORCH_CHECK((g_idx.size(0) == 0 && perm.size(0) == 0) || + (g_idx.size(0) == size_k && perm.size(0) == size_k), + "Unexpected g_idx.size(0) = ", g_idx.size(0), + " and perm.size(0) = ", perm.size(0), + ", where size_k = ", size_k); + + // Detect groupsize and act_order + int num_groups = -1; + int group_size = -1; + bool has_act_order = g_idx.size(0) != 0; + + int b_rank = b_scales.sizes().size(); + TORCH_CHECK(b_rank == 2, "b_scales rank = ", b_rank, " is not 2"); + TORCH_CHECK(b_scales.size(1) == size_n, "b_scales dim 1 = ", b_scales.size(1), + " is not size_n = ", size_n); + num_groups = b_scales.size(0); + + if (has_act_order) { + if (is_k_full) { + TORCH_CHECK(num_groups > 1, "For act_order, num_groups must be > 1"); + TORCH_CHECK(size_k % num_groups == 0, "size_k = ", size_k, + ", is not divisible by num_groups = ", num_groups); + group_size = size_k / num_groups; + } else { + group_size = 0; + } + + } else { + if (num_groups > 1) { + TORCH_CHECK( + size_k % num_groups == 0, "size_k = ", size_k, + ", is not divisible by b_scales.size(0) = ", b_scales.size(0)); + group_size = size_k / num_groups; + } else { + group_size = -1; + } + } + + // Verify workspace size + TORCH_CHECK( + size_n % gptq_marlin::min_thread_n == 0, "size_n = ", size_n, + ", is not divisible by min_thread_n = ", gptq_marlin::min_thread_n); + int min_workspace_size = + (size_n / gptq_marlin::min_thread_n) * gptq_marlin::max_par; + TORCH_CHECK(workspace.numel() >= min_workspace_size, + "workspace.numel = ", workspace.numel(), + " is below min_workspace_size = ", min_workspace_size); + + int dev = a.get_device(); + if (a.scalar_type() == at::ScalarType::Half) { + gptq_marlin::marlin_mm_f16i4( + a.data_ptr(), b_q_weight.data_ptr(), c.data_ptr(), + b_scales.data_ptr(), g_idx.data_ptr(), perm.data_ptr(), + a_tmp.data_ptr(), size_m, size_n, size_k, + workspace.data_ptr(), num_bits, has_act_order, is_k_full, num_groups, + group_size, dev, at::cuda::getCurrentCUDAStream(dev), thread_k, + thread_n, sms, gptq_marlin::max_par); + } else if (a.scalar_type() == at::ScalarType::BFloat16) { + gptq_marlin::marlin_mm_f16i4( + a.data_ptr(), b_q_weight.data_ptr(), + c.data_ptr(), b_scales.data_ptr(), + g_idx.data_ptr(), perm.data_ptr(), a_tmp.data_ptr(), + size_m, size_n, size_k, workspace.data_ptr(), num_bits, has_act_order, + is_k_full, num_groups, group_size, dev, + at::cuda::getCurrentCUDAStream(dev), thread_k, thread_n, sms, + gptq_marlin::max_par); + } else { + TORCH_CHECK(false, "gpt_marlin_gemm only supports bfloat16 and float16"); + } + + return c; +} + +#endif diff --git a/ktransformers/ktransformers_ext/cuda/gptq_marlin/gptq_marlin.cuh b/ktransformers/ktransformers_ext/cuda/gptq_marlin/gptq_marlin.cuh new file mode 100644 index 0000000..66a5920 --- /dev/null +++ b/ktransformers/ktransformers_ext/cuda/gptq_marlin/gptq_marlin.cuh @@ -0,0 +1,80 @@ +// Adapted from +// https://github.com/vllm-project/vllm/tree/main/csrc/quantization/gptq_marlin +// Copyrigth 2024 The vLLM team. +// Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +namespace gptq_marlin { + +// 8 warps are a good choice since every SM has 4 schedulers and having more +// than 1 warp per schedule allows some more latency hiding. At the same time, +// we want relatively few warps to have many registers per warp and small tiles. +static constexpr int default_threads = 256; + +static constexpr int pipe_stages = + 4; // 4 pipeline stages fit into shared memory + +static constexpr int min_thread_n = 64; +static constexpr int min_thread_k = 64; + +static constexpr int tile_size = 16; +static constexpr int max_par = 16; + +template +struct Vec { + T elems[n]; + __device__ T& operator[](int i) { return elems[i]; } +}; + +using I4 = Vec; + +constexpr int div_ceil(int a, int b) { return (a + b - 1) / b; } + +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800 +// No support for async +#else + +__device__ inline void cp_async4_pred(void* smem_ptr, const void* glob_ptr, + bool pred = true) { + const int BYTES = 16; + uint32_t smem = static_cast(__cvta_generic_to_shared(smem_ptr)); + asm volatile( + "{\n" + " .reg .pred p;\n" + " setp.ne.b32 p, %0, 0;\n" + " @p cp.async.cg.shared.global [%1], [%2], %3;\n" + "}\n" ::"r"((int)pred), + "r"(smem), "l"(glob_ptr), "n"(BYTES)); +} + +__device__ inline void cp_async4(void* smem_ptr, const void* glob_ptr) { + const int BYTES = 16; + uint32_t smem = static_cast(__cvta_generic_to_shared(smem_ptr)); + asm volatile( + "{\n" + " cp.async.cg.shared.global [%0], [%1], %2;\n" + "}\n" ::"r"(smem), + "l"(glob_ptr), "n"(BYTES)); +} + +__device__ inline void cp_async_fence() { + asm volatile("cp.async.commit_group;\n" ::); +} + +template +__device__ inline void cp_async_wait() { + asm volatile("cp.async.wait_group %0;\n" ::"n"(n)); +} + +#endif + +} // namespace gptq_marlin diff --git a/ktransformers/ktransformers_ext/cuda/gptq_marlin/gptq_marlin_dtypes.cuh b/ktransformers/ktransformers_ext/cuda/gptq_marlin/gptq_marlin_dtypes.cuh new file mode 100644 index 0000000..b8babfb --- /dev/null +++ b/ktransformers/ktransformers_ext/cuda/gptq_marlin/gptq_marlin_dtypes.cuh @@ -0,0 +1,80 @@ +// Adapted from +// https://github.com/vllm-project/vllm/tree/main/csrc/quantization/gptq_marlin +// Copyrigth 2024 The vLLM team. +// Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +#ifndef _data_types_cuh +#define _data_types_cuh +#include "gptq_marlin.cuh" +#include +#include + +namespace gptq_marlin { + +template +class ScalarType {}; + +template <> +class ScalarType { + public: + using scalar_t = half; + using scalar_t2 = half2; + + // Matrix fragments for tensor core instructions; their precise layout is + // documented here: + // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#matrix-fragments-for-mma-m16n8k16-with-floating-point-type + using FragA = Vec; + using FragB = Vec; + using FragC = Vec; + using FragS = Vec; + + static __device__ float inline num2float(const half x) { + return __half2float(x); + } + + static __device__ half2 inline num2num2(const half x) { + return __half2half2(x); + } + + static __device__ half2 inline nums2num2(const half x1, const half x2) { + return __halves2half2(x1, x2); + } + + static __host__ __device__ half inline float2num(const float x) { + return __float2half(x); + } +}; + +template <> +class ScalarType { + public: + using scalar_t = nv_bfloat16; + using scalar_t2 = nv_bfloat162; + + using FragA = Vec; + using FragB = Vec; + using FragC = Vec; + using FragS = Vec; + +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 + static __device__ float inline num2float(const nv_bfloat16 x) { + return __bfloat162float(x); + } + + static __device__ nv_bfloat162 inline num2num2(const nv_bfloat16 x) { + return __bfloat162bfloat162(x); + } + + static __device__ nv_bfloat162 inline nums2num2(const nv_bfloat16 x1, + const nv_bfloat16 x2) { + return __halves2bfloat162(x1, x2); + } + + static __host__ __device__ nv_bfloat16 inline float2num(const float x) { + return __float2bfloat16(x); + } +#endif +}; + +} // namespace gptq_marlin + +#endif diff --git a/ktransformers/ktransformers_ext/cuda/gptq_marlin/ops.h b/ktransformers/ktransformers_ext/cuda/gptq_marlin/ops.h new file mode 100644 index 0000000..47cefea --- /dev/null +++ b/ktransformers/ktransformers_ext/cuda/gptq_marlin/ops.h @@ -0,0 +1,24 @@ +/** + * @Description : + * @Author : Azure + * @Date : 2024-07-22 09:27:55 + * @Version : 1.0.0 + * @LastEditors : Azure + * @LastEditTime : 2024-07-26 08:35:00 + * @Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +**/ +#pragma once + +#include +#include +#include + +torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight, + torch::Tensor& b_scales, torch::Tensor& g_idx, + torch::Tensor& perm, torch::Tensor& workspace, + int64_t num_bits, int64_t size_m, int64_t size_n, + int64_t size_k, bool is_k_full); + +// torch::Tensor gptq_marlin_repack(torch::Tensor& b_q_weight, torch::Tensor& perm, +// int64_t size_k, int64_t size_n, +// int64_t num_bits); \ No newline at end of file diff --git a/ktransformers/ktransformers_ext/cuda/setup.py b/ktransformers/ktransformers_ext/cuda/setup.py new file mode 100644 index 0000000..baf0808 --- /dev/null +++ b/ktransformers/ktransformers_ext/cuda/setup.py @@ -0,0 +1,18 @@ + +from setuptools import setup, Extension +from torch.utils import cpp_extension +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +# setup marlin gemm +setup(name='KTransformersOps', + ext_modules=[ + CUDAExtension('KTransformersOps', [ + 'custom_gguf/dequant.cu', + 'binding.cpp', + 'gptq_marlin/gptq_marlin.cu', + # 'gptq_marlin_repack.cu', + ]) + ], + cmdclass={'build_ext': BuildExtension +}) + diff --git a/ktransformers/ktransformers_ext/examples/test_linear.py b/ktransformers/ktransformers_ext/examples/test_linear.py new file mode 100644 index 0000000..6cb8d0c --- /dev/null +++ b/ktransformers/ktransformers_ext/examples/test_linear.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Description : +Author : chenht2022 +Date : 2024-07-25 10:32:05 +Version : 1.0.0 +LastEditors : chenht2022 +LastEditTime : 2024-07-25 10:34:00 +Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +''' +import os, sys +import time +sys.path.append(os.path.dirname(__file__) + '/../build') +import cpuinfer_ext +import torch + +with torch.inference_mode(mode=True): + input_size = 16384 + output_size = 5120 + stride = 32 + proj_type = 1 # ggml_type::GGML_TYPE_F16 + hidden_type = 1 # ggml_type::GGML_TYPE_F16 + layer_num = 10 + CPUInfer = cpuinfer_ext.CPUInfer(48) + validation_iter = 100 + warm_up_iter = 1000 + test_iter = 10000 + + linears = [] + projs = [] + for _ in range(layer_num): + proj = torch.randn((output_size, input_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous() + config = cpuinfer_ext.linear.LinearConfig(input_size, output_size, stride, proj.data_ptr(), proj_type, hidden_type) + linear = cpuinfer_ext.linear.Linear(config) + projs.append(proj) + linears.append(linear) + + # validation + for i in range(validation_iter): + linear = linears[i % layer_num] + input = torch.randn((1, input_size), dtype=torch.float16).contiguous() + output = torch.empty((1, output_size), dtype=torch.float16).contiguous() + input = input / 100 + + CPUInfer.submit(linear.forward, input.data_ptr(), output.data_ptr()) + CPUInfer.sync() + # print('cpuinfer output', output) + + proj = projs[i%layer_num] + t_output = torch.mm(input, proj.t()) + # print('torch output', t_output) + + diff = torch.mean(torch.abs(output - t_output)) / torch.mean(torch.abs(t_output)) + print('diff = ', diff) + assert(diff < 0.001) + + # warm up + for i in range(warm_up_iter): + linear = linears[i % layer_num] + input = torch.randn((1, input_size), dtype=torch.float16).contiguous() + output = torch.empty((1, output_size), dtype=torch.float16).contiguous() + input = input / 100 + CPUInfer.submit(linear.forward, input.data_ptr(), output.data_ptr()) + CPUInfer.sync() + + # test + total_time = 0 + for i in range(test_iter): + linear = linears[i % layer_num] + input = torch.randn((1, input_size), dtype=torch.float16).contiguous() + output = torch.empty((1, output_size), dtype=torch.float16).contiguous() + input = input / 100 + start = time.perf_counter() + CPUInfer.submit(linear.forward, input.data_ptr(), output.data_ptr()) + CPUInfer.sync() + end = time.perf_counter() + total_time += end - start + print('Time: ', total_time) + print('Iteration: ', test_iter) + print('Time per iteration: ', total_time / test_iter) + print('Bandwidth: ', input_size * output_size * 2 * test_iter / total_time / 1000 / 1000 / 1000, 'GB/s') + print("All tasks completed.") \ No newline at end of file diff --git a/ktransformers/ktransformers_ext/examples/test_mlp.py b/ktransformers/ktransformers_ext/examples/test_mlp.py new file mode 100644 index 0000000..d965877 --- /dev/null +++ b/ktransformers/ktransformers_ext/examples/test_mlp.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Description : +Author : chenht2022 +Date : 2024-07-25 10:32:05 +Version : 1.0.0 +LastEditors : chenht2022 +LastEditTime : 2024-07-25 10:34:03 +Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +''' +import os, sys +import time +sys.path.append(os.path.dirname(__file__) + '/../build') +import cpuinfer_ext +import torch + +with torch.inference_mode(mode=True): + hidden_size = 5120 + intermediate_size = 3072 + stride = 32 + gate_type = 1 # ggml_type::GGML_TYPE_F16 + up_type = 1 # ggml_type::GGML_TYPE_F16 + down_type = 1 # ggml_type::GGML_TYPE_F16 + hidden_type = 1 # ggml_type::GGML_TYPE_F16 + layer_num = 10 + CPUInfer = cpuinfer_ext.CPUInfer(48) + validation_iter = 100 + warm_up_iter = 1000 + test_iter = 10000 + + mlps = [] + gate_projs = [] + up_projs = [] + down_projs = [] + for _ in range(layer_num): + gate_proj = torch.randn((intermediate_size, hidden_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous() + up_proj = torch.randn((intermediate_size, hidden_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous() + down_proj = torch.randn((hidden_size, intermediate_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous() + config = cpuinfer_ext.mlp.MLPConfig(hidden_size, intermediate_size, stride, gate_proj.data_ptr(), up_proj.data_ptr(), down_proj.data_ptr(), gate_type, up_type, down_type, hidden_type) + mlp = cpuinfer_ext.mlp.MLP(config) + gate_projs.append(gate_proj) + up_projs.append(up_proj) + down_projs.append(down_proj) + mlps.append(mlp) + + # validation + for i in range(validation_iter): + mlp = mlps[i % layer_num] + input = torch.randn((1, hidden_size), dtype=torch.float16).contiguous() + output = torch.empty((1, hidden_size), dtype=torch.float16).contiguous() + input = input / 100 + + CPUInfer.submit(mlp.forward, input.data_ptr(), output.data_ptr()) + CPUInfer.sync() + # print('cpuinfer output', output) + + def act_fn(x): + return x / (1.0 + torch.exp(-x)) + gate_proj = gate_projs[i%layer_num] + up_proj = up_projs[i%layer_num] + down_proj = down_projs[i%layer_num] + gate_buf = torch.mm(input, gate_proj.t()) + up_buf = torch.mm(input, up_proj.t()) + intermediate = act_fn(gate_buf) * up_buf + t_output = torch.mm(intermediate, down_proj.t()) + # print('torch output', t_output) + + diff = torch.mean(torch.abs(output - t_output)) / torch.mean(torch.abs(t_output)) + print('diff = ', diff) + assert(diff < 0.001) + + # warm up + for i in range(warm_up_iter): + mlp = mlps[i % layer_num] + input = torch.randn((1, hidden_size), dtype=torch.float16).contiguous() + output = torch.empty((1, hidden_size), dtype=torch.float16).contiguous() + input = input / 100 + CPUInfer.submit(mlp.forward, input.data_ptr(), output.data_ptr()) + CPUInfer.sync() + + # test + total_time = 0 + for i in range(test_iter): + mlp = mlps[i % layer_num] + input = torch.randn((1, hidden_size), dtype=torch.float16).contiguous() + output = torch.empty((1, hidden_size), dtype=torch.float16).contiguous() + input = input / 100 + start = time.time() + CPUInfer.submit(mlp.forward, input.data_ptr(), output.data_ptr()) + CPUInfer.sync() + end = time.time() + total_time += end - start + print('Time: ', total_time) + print('Iteration: ', test_iter) + print('Time per iteration: ', total_time / test_iter) + print('Bandwidth: ', hidden_size * intermediate_size * 3 * 2 * test_iter / total_time / 1024 / 1024 / 1024, 'GB/s') + print("All tasks completed.") \ No newline at end of file diff --git a/ktransformers/ktransformers_ext/examples/test_moe.py b/ktransformers/ktransformers_ext/examples/test_moe.py new file mode 100644 index 0000000..0597811 --- /dev/null +++ b/ktransformers/ktransformers_ext/examples/test_moe.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Description : +Author : chenht2022 +Date : 2024-07-25 10:32:05 +Version : 1.0.0 +LastEditors : chenht2022 +LastEditTime : 2024-07-25 10:34:06 +Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +''' +import os, sys +import time +sys.path.append(os.path.dirname(__file__) + '/../build') +import cpuinfer_ext +import torch + +with torch.inference_mode(mode=True): + expert_num = 10 + hidden_size = 5120 + intermediate_size = 1536 + stride = 32 + group_min_len = 10 + group_max_len = 1024 + gate_type = 1 # ggml_type::GGML_TYPE_F16 + up_type = 1 # ggml_type::GGML_TYPE_F16 + down_type = 1 # ggml_type::GGML_TYPE_F16 + hidden_type = 1 # ggml_type::GGML_TYPE_F16 + n_routed_experts = 6 + qlen = 30 + layer_num = 10 + CPUInfer = cpuinfer_ext.CPUInfer(48) + validation_iter = 100 + warm_up_iter = 1000 + test_iter = 10000 + + moes = [] + gate_projs = [] + up_projs = [] + down_projs = [] + for _ in range(layer_num): + gate_proj = torch.randn((expert_num, intermediate_size, hidden_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous() + up_proj = torch.randn((expert_num, intermediate_size, hidden_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous() + down_proj = torch.randn((expert_num, hidden_size, intermediate_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous() + config = cpuinfer_ext.moe.MOEConfig(expert_num, n_routed_experts, hidden_size, intermediate_size, stride, group_min_len, group_max_len, gate_proj.data_ptr(), up_proj.data_ptr(), down_proj.data_ptr(), gate_type, up_type, down_type, hidden_type) + moe = cpuinfer_ext.moe.MOE(config) + gate_projs.append(gate_proj) + up_projs.append(up_proj) + down_projs.append(down_proj) + moes.append(moe) + + # validation + for i in range(validation_iter): + moe = moes[i % layer_num] + expert_ids = torch.randint(0, expert_num, (qlen, n_routed_experts), dtype=torch.int64).contiguous() + weights = torch.rand((qlen, n_routed_experts), dtype=torch.float32).contiguous() + input = torch.randn((qlen, 1, hidden_size), dtype=torch.float16).contiguous() + output = torch.empty((qlen, 1, hidden_size), dtype=torch.float16).contiguous() + input = input / 100 + + CPUInfer.submit(moe.forward, qlen, n_routed_experts, expert_ids.data_ptr(), weights.data_ptr(), input.data_ptr(), output.data_ptr()) + CPUInfer.sync() + # print('cpuinfer output', output) + + def act_fn(x): + return x / (1.0 + torch.exp(-x)) + t_output = torch.zeros((qlen, 1, hidden_size), dtype=torch.float32).contiguous() + gate_proj = gate_projs[i%layer_num] + up_proj = up_projs[i%layer_num] + down_proj = down_projs[i%layer_num] + for token_idx in range(qlen): + for i, expert_id in enumerate(expert_ids[token_idx]): + gate_buf = torch.mm(input[token_idx], gate_proj[expert_id].t()) + up_buf = torch.mm(input[token_idx], up_proj[expert_id].t()) + intermediate = act_fn(gate_buf) * up_buf + expert_output = torch.mm(intermediate, down_proj[expert_id].t()) + t_output[token_idx] += weights[token_idx][i] * expert_output + # print('torch output', t_output) + + diff = torch.mean(torch.abs(output - t_output)) / torch.mean(torch.abs(t_output)) + print('diff = ', diff) + assert(diff < 0.001) + + # warm up + for i in range(warm_up_iter): + moe = moes[i % layer_num] + expert_ids = torch.randint(0, expert_num, (qlen, n_routed_experts), dtype=torch.int64).contiguous() + weights = torch.rand((qlen, n_routed_experts), dtype=torch.float32).contiguous() + input = torch.randn((qlen, hidden_size), dtype=torch.float16).contiguous() + output = torch.empty((qlen, hidden_size), dtype=torch.float16).contiguous() + input = input / 100 + CPUInfer.submit(moe.forward, qlen, n_routed_experts, expert_ids.data_ptr(), weights.data_ptr(), input.data_ptr(), output.data_ptr()) + CPUInfer.sync() + + # test + total_time = 0 + for i in range(test_iter): + moe = moes[i % layer_num] + expert_ids = torch.randint(0, expert_num, (qlen, n_routed_experts), dtype=torch.int64).contiguous() + weights = torch.rand((qlen, n_routed_experts), dtype=torch.float32).contiguous() + input = torch.randn((qlen, hidden_size), dtype=torch.float16).contiguous() + output = torch.empty((qlen, hidden_size), dtype=torch.float16).contiguous() + input = input / 100 + start = time.perf_counter() + CPUInfer.submit(moe.forward, qlen, n_routed_experts, expert_ids.data_ptr(), weights.data_ptr(), input.data_ptr(), output.data_ptr()) + CPUInfer.sync() + end = time.perf_counter() + total_time += end - start + print('Time: ', total_time) + print('Iteration: ', test_iter) + print('Time per iteration: ', total_time / test_iter) + print('Bandwidth: ', hidden_size * intermediate_size * 3 * n_routed_experts * 2 * test_iter / total_time / 1000 / 1000 / 1000, 'GB/s') + print("All tasks completed.") \ No newline at end of file diff --git a/ktransformers/ktransformers_ext/ext_bindings.cpp b/ktransformers/ktransformers_ext/ext_bindings.cpp new file mode 100644 index 0000000..0aeead3 --- /dev/null +++ b/ktransformers/ktransformers_ext/ext_bindings.cpp @@ -0,0 +1,264 @@ +/** + * @Description : + * @Author : chenht2022 + * @Date : 2024-07-22 02:03:22 + * @Version : 1.0.0 + * @LastEditors : chenht2022 + * @LastEditTime : 2024-07-25 10:34:23 + * @Copyright (c) 2024 by KVCache.AI, All Rights Reserved. + **/ +// Python bindings +#include +#include +#include +#include "cpu_backend/cpuinfer.h" +#include "cuda_runtime.h" +#include "device_launch_parameters.h" +#include "llamafile/flags.h" +#include "operators/llamafile/linear.h" +#include "operators/llamafile/mlp.h" +#include "operators/llamafile/moe.h" +#include "pybind11/functional.h" +#include "pybind11/operators.h" +#include "pybind11/pybind11.h" +#include "pybind11/stl.h" + +namespace py = pybind11; +using namespace pybind11::literals; + +// Binding functions for the Linear class +class LinearBindings { + public: + static void bind_forward(CPUInfer& cpuinfer, Linear* linear, py::args args, py::kwargs kwargs) { + auto input = args[0].cast(); + auto output = args[1].cast(); + cpuinfer.submit(&Linear::forward, linear, + (const void*)input, (void*)output); + } + + static void bind_warm_up(CPUInfer& cpuinfer, Linear* linear, py::args args, py::kwargs kwargs) { + cpuinfer.submit(&Linear::warm_up, linear); + } + + static void bind_functions(CPUInfer& cpuinfer, py::object func, py::args args, py::kwargs kwargs) { + auto linear = func.attr("__self__").cast(); + std::string func_name = py::str(func.attr("__func__").attr("__name__")); + + if (func_name == "forward") { + bind_forward(cpuinfer, linear, args, kwargs); + } else if (func_name == "warm_up") { + bind_warm_up(cpuinfer, linear, args, kwargs); + } else { + throw py::value_error("Unsupported function: " + + std::string(func_name)); + } + } +}; + +// Binding functions for the MLP class +class MLPBindings { + public: + static void bind_forward(CPUInfer& cpuinfer, MLP* mlp, py::args args, py::kwargs kwargs) { + auto input = args[0].cast(); + auto output = args[1].cast(); + cpuinfer.submit(&MLP::forward, mlp, + (const void*)input, (void*)output); + } + + static void bind_warm_up(CPUInfer& cpuinfer, MLP* mlp, py::args args, py::kwargs kwargs) { + cpuinfer.submit(&MLP::warm_up, mlp); + } + + static void bind_functions(CPUInfer& cpuinfer, py::object func, py::args args, py::kwargs kwargs) { + auto mlp = func.attr("__self__").cast(); + std::string func_name = py::str(func.attr("__func__").attr("__name__")); + + if (func_name == "forward") { + bind_forward(cpuinfer, mlp, args, kwargs); + } else if (func_name == "warm_up") { + bind_warm_up(cpuinfer, mlp, args, kwargs); + } else { + throw py::value_error("Unsupported function: " + + std::string(func_name)); + } + } +}; + +// Binding functions for the MOE class +class MOEBindings { + public: + static void bind_forward(CPUInfer& cpuinfer, MOE* moe, py::args args, py::kwargs kwargs) { + int qlen = args[0].cast(); + int k = args[1].cast(); + auto expert_ids = args[2].cast(); + auto weights = args[3].cast(); + auto input = args[4].cast(); + auto output = args[5].cast(); + cpuinfer.submit(&MOE::forward, moe, + qlen, k, (const uint64_t*)expert_ids, (const float*)weights, (const void*)input, (void*)output); + } + + static void bind_warm_up(CPUInfer& cpuinfer, MOE* moe, py::args args, py::kwargs kwargs) { + cpuinfer.submit(&MOE::warm_up, moe); + } + + static void bind_functions(CPUInfer& cpuinfer, py::object func, py::args args, py::kwargs kwargs) { + auto moe = func.attr("__self__").cast(); + std::string func_name = py::str(func.attr("__func__").attr("__name__")); + + if (func_name == "forward") { + bind_forward(cpuinfer, moe, args, kwargs); + } else if (func_name == "warm_up") { + bind_warm_up(cpuinfer, moe, args, kwargs); + } else { + throw py::value_error("Unsupported function: " + + std::string(func_name)); + } + } +}; + +struct MOEForwardArgs { + CPUInfer* cpuinfer; + MOE* moe; + int qlen; + int k; + uint64_t* expert_ids; + float* weights; + void* input; + void* output; +}; + +void submit_moe_forward_with_host_args_ptr(void* host_args_ptr) { + MOEForwardArgs* host_args = (MOEForwardArgs*)host_args_ptr; + host_args->cpuinfer->submit(&MOE::forward, host_args->moe, + host_args->qlen, host_args->k, host_args->expert_ids, host_args->weights, host_args->input, host_args->output); +} + +void cpuinfer_sync(void* host_args_ptr) { + CPUInfer* cpuinfer = (CPUInfer*)host_args_ptr; + cpuinfer->sync(); +} + +PYBIND11_MODULE(cpuinfer_ext, m) { + auto linear_module = m.def_submodule("linear"); + + py::class_(linear_module, "LinearConfig") + .def(py::init([](int hidden_size, int intermediate_size, int stride, intptr_t proj, int proj_type, int hidden_type) { + return LinearConfig(hidden_size, intermediate_size, stride, (void*)proj, (ggml_type)proj_type, (ggml_type)hidden_type); + })); + + py::class_(linear_module, "Linear") + .def(py::init()) + .def("warm_up", [](Linear& linear) { + throw std::runtime_error("!!! Doing nothing, please use CPUInfer.submit to call it!!!\n"); + }) + .def("forward", [](Linear& linear, intptr_t input, intptr_t output) { + throw std::runtime_error("!!! Doing nothing, please use CPUInfer.submit to call it!!!\n"); + }); + + auto mlp_module = m.def_submodule("mlp"); + + py::class_(mlp_module, "MLPConfig") + .def(py::init([](int hidden_size, int intermediate_size, int stride, intptr_t gate_proj, intptr_t up_proj, intptr_t down_proj, int gate_type, int up_type, int down_type, int hidden_type) { + return MLPConfig(hidden_size, intermediate_size, stride, (void*)gate_proj, (void*)up_proj, (void*)down_proj, (ggml_type)gate_type, (ggml_type)up_type, (ggml_type)down_type, (ggml_type)hidden_type); + })); + + py::class_(mlp_module, "MLP") + .def(py::init()) + .def("warm_up", [](MLP& mlp) { + throw std::runtime_error("!!! Doing nothing, please use CPUInfer.submit to call it!!!\n"); + }) + .def("forward", [](MLP& mlp, intptr_t input, intptr_t output) { + throw std::runtime_error("!!! Doing nothing, please use CPUInfer.submit to call it!!!\n"); + }); + + auto moe_module = m.def_submodule("moe"); + + py::class_(moe_module, "MOEConfig") + .def(py::init([](int expert_num, int routed_expert_num, int hidden_size, int intermediate_size, int stride, int group_min_len, int group_max_len, intptr_t gate_proj, intptr_t up_proj, intptr_t down_proj, int gate_type, int up_type, int down_type, int hidden_type) { + return MOEConfig(expert_num, routed_expert_num, hidden_size, intermediate_size, stride, group_min_len, group_max_len, (void*)gate_proj, (void*)up_proj, (void*)down_proj, (ggml_type)gate_type, (ggml_type)up_type, (ggml_type)down_type, (ggml_type)hidden_type); + })); + + py::class_(moe_module, "MOE") + .def(py::init()) + .def("warm_up", [](MOE& moe) { + throw std::runtime_error("!!! Doing nothing, please use CPUInfer.submit to call it!!!\n"); + }) + .def("forward", [](MOE& moe, int k, uint64_t expert_ids, intptr_t weights, intptr_t input, intptr_t output) { + throw std::runtime_error("!!! Doing nothing, please use CPUInfer.submit to call it!!!\n"); + }); + + py::class_(m, "CPUInfer") + .def(py::init()) + .def("submit", + [linear_module, mlp_module, moe_module](CPUInfer& cpuinfer, py::object func, py::args args, py::kwargs kwargs) { + if (py::hasattr(func, "__self__") && + py::hasattr(func, "__func__")) { + std::string class_name = py::str(func.attr("__self__") + .attr("__class__") + .attr("__name__")); + if (class_name == "Linear") { + LinearBindings::bind_functions(cpuinfer, func, + args, kwargs); + } else if (class_name == "MLP") { + MLPBindings::bind_functions(cpuinfer, func, + args, kwargs); + } else if (class_name == "MOE") { + MOEBindings::bind_functions(cpuinfer, func, + args, kwargs); + } else { + // handle other classes + throw py::type_error("Unsupported class type: " + + class_name); + } + } else { + // handle cases where func does not have __self__ or + // __func__ + throw py::type_error( + "Invalid function object: missing " + "__self__ or __func__ attribute."); + } + }) + .def("submit_with_cuda_stream", + [linear_module, mlp_module, moe_module](CPUInfer& cpuinfer, intptr_t user_cuda_stream, py::object func, py::args args, py::kwargs kwargs) { + if (py::hasattr(func, "__self__") && + py::hasattr(func, "__func__")) { + std::string class_name = py::str(func.attr("__self__") + .attr("__class__") + .attr("__name__")); + if (class_name == "MOE") { + std::string func_name = py::str(func.attr("__func__").attr("__name__")); + if (func_name == "forward") { + auto moe = func.attr("__self__").cast(); + int qlen = args[0].cast(); + int k = args[1].cast(); + auto expert_ids = args[2].cast(); + auto weights = args[3].cast(); + auto input = args[4].cast(); + auto output = args[5].cast(); + MOEForwardArgs* moe_forward_args = new MOEForwardArgs{&cpuinfer, moe, qlen, k, (uint64_t*)expert_ids, (float*)weights, (void*)input, (void*)output}; + // submit_moe_forward_with_host_args_ptr(moe_forward_args); + cudaLaunchHostFunc((cudaStream_t)user_cuda_stream, (cudaHostFn_t)submit_moe_forward_with_host_args_ptr, moe_forward_args); + } else { + throw py::value_error("Unsupported function: " + + std::string(func_name)); + } + } else { + // handle other classes + throw py::type_error("Unsupported class type: " + + class_name); + } + } else { + // handle cases where func does not have __self__ or + // __func__ + throw py::type_error( + "Invalid function object: missing " + "__self__ or __func__ attribute."); + } + }) + .def("sync_with_cuda_stream", [](CPUInfer& cpuinfer, intptr_t user_cuda_stream) { + // cpuinfer_sync((void*)(&cpuinfer)); + cudaLaunchHostFunc((cudaStream_t)user_cuda_stream, (cudaHostFn_t)cpuinfer_sync, (void*)(&cpuinfer)); + }) + .def("sync", &CPUInfer::sync); +} diff --git a/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/gptq.py b/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/gptq.py new file mode 100644 index 0000000..cda3e7a --- /dev/null +++ b/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/gptq.py @@ -0,0 +1,206 @@ +import math +import os +import time +from logging import getLogger + +import torch +import torch.nn as nn +import transformers + +from .quantizer import Quantizer + + +logger = getLogger(__name__) + +torch.backends.cuda.matmul.allow_tf32 = False +torch.backends.cudnn.allow_tf32 = False + + +class GPTQ: + def __init__(self, layer): + self.layer = layer + self.dev = self.layer.weight.device + W = layer.weight.data.clone() + if isinstance(self.layer, nn.Conv2d): + W = W.flatten(1) + if isinstance(self.layer, transformers.pytorch_utils.Conv1D): + W = W.t() + self.rows = W.shape[0] + self.columns = W.shape[1] + self.H = torch.zeros((self.columns, self.columns), device=self.dev) + self.nsamples = 0 + self.quantizer = Quantizer() + + def add_batch(self, inp, out): + if os.environ.get("DEBUG"): + self.inp1 = inp + self.out1 = out + if len(inp.shape) == 2: + inp = inp.unsqueeze(0) + tmp = inp.shape[0] + if isinstance(self.layer, nn.Linear) or isinstance(self.layer, transformers.Conv1D): + if len(inp.shape) == 3: + inp = inp.reshape((-1, inp.shape[-1])) + inp = inp.t() + if isinstance(self.layer, nn.Conv2d): + unfold = nn.Unfold( + self.layer.kernel_size, + dilation=self.layer.dilation, + padding=self.layer.padding, + stride=self.layer.stride, + ) + inp = unfold(inp) + inp = inp.permute([1, 0, 2]) + inp = inp.flatten(1) + self.H *= self.nsamples / (self.nsamples + tmp) + self.nsamples += tmp + # inp = inp.float() + inp = math.sqrt(2 / self.nsamples) * inp.float() + # self.H += 2 / self.nsamples * inp.matmul(inp.t()) + self.H += inp.matmul(inp.t()) + + def fasterquant( + self, + blocksize=128, + percdamp=0.01, + group_size=-1, + actorder=False, + static_groups=False, + ): + W = self.layer.weight.data.clone() + if isinstance(self.layer, nn.Conv2d): + W = W.flatten(1) + if isinstance(self.layer, transformers.Conv1D): + W = W.t() + W = W.float() + + tick = time.time() + + if not self.quantizer.ready(): + self.quantizer.find_params(W, weight=True) + + H = self.H + del self.H + dead = torch.diag(H) == 0 + H[dead, dead] = 1 + W[:, dead] = 0 + + g_idx = [] + scale = [] + zero = [] + now_idx = 1 + + if static_groups: + import copy + + groups = [] + for i in range(0, self.columns, group_size): + quantizer = copy.deepcopy(self.quantizer) + quantizer.find_params(W[:, i : (i + group_size)], weight=True) + scale.append(quantizer.scale) + zero.append(quantizer.zero) + groups.append(quantizer) + + if actorder: + perm = torch.argsort(torch.diag(H), descending=True) + W = W[:, perm] + H = H[perm][:, perm] + invperm = torch.argsort(perm) + + Losses = torch.zeros_like(W) + Q = torch.zeros_like(W) + + damp = percdamp * torch.mean(torch.diag(H)) + diag = torch.arange(self.columns, device=self.dev) + H[diag, diag] += damp + H = torch.linalg.cholesky(H) + H = torch.cholesky_inverse(H) + H = torch.linalg.cholesky(H, upper=True) + Hinv = H + + for i1 in range(0, self.columns, blocksize): + i2 = min(i1 + blocksize, self.columns) + count = i2 - i1 + + W1 = W[:, i1:i2].clone() + Q1 = torch.zeros_like(W1) + Err1 = torch.zeros_like(W1) + Losses1 = torch.zeros_like(W1) + Hinv1 = Hinv[i1:i2, i1:i2] + + for i in range(count): + w = W1[:, i] + d = Hinv1[i, i] + + if group_size != -1: + if not static_groups: + if (i1 + i) % group_size == 0: + self.quantizer.find_params(W[:, (i1 + i) : (i1 + i + group_size)], weight=True) + + if ((i1 + i) // group_size) - now_idx == -1: + scale.append(self.quantizer.scale) + zero.append(self.quantizer.zero) + now_idx += 1 + else: + idx = i1 + i + if actorder: + idx = perm[idx] + self.quantizer = groups[idx // group_size] + + q = self.quantizer.quantize(w.unsqueeze(1)).flatten() + Q1[:, i] = q + Losses1[:, i] = (w - q) ** 2 / d**2 + + err1 = (w - q) / d + W1[:, i:] -= err1.unsqueeze(1).matmul(Hinv1[i, i:].unsqueeze(0)) + Err1[:, i] = err1 + + Q[:, i1:i2] = Q1 + Losses[:, i1:i2] = Losses1 / 2 + + W[:, i2:] -= Err1.matmul(Hinv[i1:i2, i2:]) + + if os.environ.get("DEBUG"): + self.layer.weight.data[:, :i2] = Q[:, :i2] + self.layer.weight.data[:, i2:] = W[:, i2:] + logger.debug(torch.sum((self.layer(self.inp1) - self.out1) ** 2)) + logger.debug(torch.sum(Losses)) + + torch.cuda.synchronize() + logger.info(f"duration: {(time.time() - tick)}") + logger.info(f"avg loss: {torch.sum(Losses).item() / self.nsamples}") + + group_size = group_size if group_size != -1 else self.columns + if static_groups and actorder: + g_idx = [perm[i] // group_size for i in range(self.columns)] + else: + g_idx = [i // group_size for i in range(self.columns)] + g_idx = torch.tensor(g_idx, dtype=torch.int32, device=Q.device) + if actorder: + Q = Q[:, invperm] + g_idx = g_idx[invperm] + + if isinstance(self.layer, transformers.Conv1D): + Q = Q.t() + self.layer.weight.data = Q.reshape(self.layer.weight.shape).type_as(self.layer.weight.data) + if os.environ.get("DEBUG"): + logger.debug(torch.sum((self.layer(self.inp1) - self.out1) ** 2)) + + if scale == []: + scale.append(self.quantizer.scale) + zero.append(self.quantizer.zero) + scale = torch.cat(scale, dim=1) + zero = torch.cat(zero, dim=1) + return scale, zero, g_idx + + def free(self): + if os.environ.get("DEBUG"): + self.inp1 = None + self.out1 = None + self.H = None + self.Losses = None + self.Trace = None + torch.cuda.empty_cache() + + +__all__ = ["GPTQ"] diff --git a/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/gptq_marlin.py b/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/gptq_marlin.py new file mode 100644 index 0000000..599070f --- /dev/null +++ b/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/gptq_marlin.py @@ -0,0 +1,458 @@ +import enum +from enum import Enum +from typing import Any, Dict, List, Optional + +import torch +from torch.nn.parameter import Parameter + +from vllm import _custom_ops as ops +from vllm.logger import init_logger +from vllm.model_executor.layers.linear import (LinearBase, LinearMethodBase, + set_weight_attrs) +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig) + +logger = init_logger(__name__) + +GPTQ_MARLIN_TILE = 16 +GPTQ_MARLIN_MIN_THREAD_N = 64 +GPTQ_MARLIN_MIN_THREAD_K = 128 +GPTQ_MARLIN_MAX_PARALLEL = 16 + +GPTQ_MARLIN_SUPPORTED_NUM_BITS = [4, 8] +GPTQ_MARLIN_SUPPORTED_GROUP_SIZES = [-1, 32, 64, 128] +GPTQ_MARLIN_SUPPORTED_SYM = [True] + + +# Permutations for Marlin scale shuffling +def get_scale_perms(num_bits: int): + scale_perm: List[int] = [] + for i in range(8): + scale_perm.extend([i + 8 * j for j in range(8)]) + scale_perm_single: List[int] = [] + for i in range(4): + scale_perm_single.extend( + [2 * i + j for j in [0, 1, 8, 9, 16, 17, 24, 25]]) + return scale_perm, scale_perm_single + + +def get_pack_factor(num_bits: int): + assert (num_bits in GPTQ_MARLIN_SUPPORTED_NUM_BITS + ), f"Unsupported num_bits = {num_bits}" + return 32 // num_bits + + +def marlin_permute_scales(s: torch.Tensor, size_k: int, size_n: int, + group_size: int, num_bits: int): + scale_perm, scale_perm_single = get_scale_perms(num_bits) + if group_size < size_k and group_size != -1: + s = s.reshape((-1, len(scale_perm)))[:, scale_perm] + else: + s = s.reshape((-1, len(scale_perm_single)))[:, scale_perm_single] + s = s.reshape((-1, size_n)).contiguous() + + return s + + +class GPTQMarlinConfig(QuantizationConfig): + """Config class for GPTQ Marlin""" + + def __init__(self, weight_bits: int, group_size: int, desc_act: bool, + is_sym: bool) -> None: + if desc_act and group_size == -1: + # In this case, act_order == True is the same as act_order == False + # (since we have only one group per output channel) + desc_act = False + + self.weight_bits = weight_bits + self.group_size = group_size + self.desc_act = desc_act + self.is_sym = is_sym + + # Verify + if self.weight_bits not in GPTQ_MARLIN_SUPPORTED_NUM_BITS: + raise ValueError( + f"Marlin does not support weight_bits = {self.weight_bits}. " + f"Only weight_bits = {GPTQ_MARLIN_SUPPORTED_NUM_BITS} " + "are supported.") + if self.group_size not in GPTQ_MARLIN_SUPPORTED_GROUP_SIZES: + raise ValueError( + f"Marlin does not support group_size = {self.group_size}. " + f"Only group_sizes = {GPTQ_MARLIN_SUPPORTED_GROUP_SIZES} " + "are supported.") + if self.is_sym not in GPTQ_MARLIN_SUPPORTED_SYM: + raise ValueError( + f"Marlin does not support is_sym = {self.is_sym}. " + f"Only sym = {GPTQ_MARLIN_SUPPORTED_SYM} are supported.") + + # Init + self.pack_factor = get_pack_factor(weight_bits) + self.tile_size = GPTQ_MARLIN_TILE + self.min_thread_n = GPTQ_MARLIN_MIN_THREAD_N + self.min_thread_k = GPTQ_MARLIN_MIN_THREAD_K + self.max_parallel = GPTQ_MARLIN_MAX_PARALLEL + + def __repr__(self) -> str: + return (f"GPTQMarlinConfig(weight_bits={self.weight_bits}, " + f"group_size={self.group_size}, " + f"desc_act={self.desc_act})") + + @classmethod + def get_name(cls) -> str: + return "gptq_marlin" + + @classmethod + def get_supported_act_dtypes(cls) -> List[torch.dtype]: + return [torch.half, torch.bfloat16] + + @classmethod + def get_min_capability(cls) -> int: + return 80 + + @classmethod + def get_config_filenames(cls) -> List[str]: + return ["quantize_config.json"] + + @classmethod + def from_config(cls, config: Dict[str, Any]) -> "GPTQMarlinConfig": + weight_bits = cls.get_from_keys(config, ["bits"]) + group_size = cls.get_from_keys(config, ["group_size"]) + desc_act = cls.get_from_keys(config, ["desc_act"]) + is_sym = cls.get_from_keys(config, ["sym"]) + return cls(weight_bits, group_size, desc_act, is_sym) + + @classmethod + def override_quantization_method(cls, hf_quant_cfg, + user_quant) -> Optional[str]: + can_convert = cls.is_marlin_compatible(hf_quant_cfg) + + is_valid_user_quant = (user_quant is None or user_quant == "marlin") + + if can_convert and is_valid_user_quant: + msg = ("The model is convertible to {} during runtime." + " Using {} kernel.".format(cls.get_name(), cls.get_name())) + logger.info(msg) + return cls.get_name() + + if can_convert and user_quant == "gptq": + logger.info("Detected that the model can run with gptq_marlin" + ", however you specified quantization=gptq explicitly," + " so forcing gptq. Use quantization=gptq_marlin for" + " faster inference") + return None + + def get_quant_method( + self, + layer: torch.nn.Module) -> Optional["GPTQMarlinLinearMethod"]: + if isinstance(layer, LinearBase): + return GPTQMarlinLinearMethod(self) + return None + + def get_scaled_act_names(self) -> List[str]: + return [] + + @classmethod + def is_marlin_compatible(cls, quant_config: Dict[str, Any]): + # Extract data from quant config. + num_bits = quant_config.get("bits", None) + group_size = quant_config.get("group_size", None) + sym = quant_config.get("sym", None) + desc_act = quant_config.get("desc_act", None) + + # If we cannot find the info needed in the config, cannot convert. + if (num_bits is None or group_size is None or sym is None + or desc_act is None): + return False + + # If the capability of the device is too low, cannot convert. + major, minor = torch.cuda.get_device_capability() + device_capability = major * 10 + minor + if device_capability < cls.get_min_capability(): + return False + + # Otherwise, can convert if model satisfies marlin constraints. + return (num_bits in GPTQ_MARLIN_SUPPORTED_NUM_BITS + and group_size in GPTQ_MARLIN_SUPPORTED_GROUP_SIZES + and sym in GPTQ_MARLIN_SUPPORTED_SYM) + + +class GPTQMarlinState(Enum): + REPACK = enum.auto() + READY = enum.auto() + + +class GPTQMarlinLinearMethod(LinearMethodBase): + """Linear method for GPTQ Marlin. + + Args: + quant_config: The GPTQ Marlin quantization config. + """ + + def __init__(self, quant_config: GPTQMarlinConfig) -> None: + self.quant_config = quant_config + + def create_weights( + self, + layer: torch.nn.Module, + input_size_per_partition: int, + output_partition_sizes: List[int], + input_size: int, + output_size: int, + params_dtype: torch.dtype, + **extra_weight_attrs, + ) -> None: + del output_size + + # Normalize group_size + if self.quant_config.group_size != -1: + group_size = self.quant_config.group_size + else: + group_size = input_size + + # Validate dtype + if params_dtype not in [torch.float16, torch.bfloat16]: + raise ValueError(f"The params dtype must be float16 " + f"or bfloat16, but got {params_dtype}") + + # Validate output_size_per_partition + output_size_per_partition = sum(output_partition_sizes) + if output_size_per_partition % self.quant_config.min_thread_n != 0: + raise ValueError( + f"Weight output_size_per_partition = " + f"{output_size_per_partition} is not divisible by " + f" min_thread_n = {self.quant_config.min_thread_n}.") + + # Validate input_size_per_partition + if input_size_per_partition % self.quant_config.min_thread_k != 0: + raise ValueError( + f"Weight input_size_per_partition = " + f"{input_size_per_partition} is not divisible " + f"by min_thread_k = {self.quant_config.min_thread_k}.") + + if (group_size < input_size + and input_size_per_partition % group_size != 0): + raise ValueError( + f"Weight input_size_per_partition = {input_size_per_partition}" + f" is not divisible by group_size = {group_size}.") + + # Detect sharding of scales/zp + + # By default, no sharding over "input dim" + scales_and_zp_size = input_size // group_size + scales_and_zp_input_dim = None + + if self.quant_config.desc_act: + # Act-order case + assert self.quant_config.group_size != -1 + + is_k_full = input_size_per_partition == input_size + + else: + # No act-order case + + # K is always full due to full alignment with + # group-size and shard of scales/zp + is_k_full = True + + # If this is a row-parallel case, then shard scales/zp + if (input_size != input_size_per_partition + and self.quant_config.group_size != -1): + scales_and_zp_size = input_size_per_partition // group_size + scales_and_zp_input_dim = 0 + + # Init buffers + + # Quantized weights + qweight = Parameter( + torch.empty( + input_size_per_partition // self.quant_config.pack_factor, + output_size_per_partition, + dtype=torch.int32, + ), + requires_grad=False, + ) + set_weight_attrs( + qweight, + { + **extra_weight_attrs, + "input_dim": 0, + "output_dim": 1, + "packed_dim": 0, + "pack_factor": self.quant_config.pack_factor, + }, + ) + + # Activation order + g_idx = Parameter( + torch.empty( + input_size_per_partition, + dtype=torch.int32, + ), + requires_grad=False, + ) + # Ignore warning from fused linear layers such as QKVParallelLinear. + set_weight_attrs( + g_idx, + { + **extra_weight_attrs, "input_dim": 0, + "ignore_warning": True + }, + ) + + g_idx_sort_indices = torch.empty( + g_idx.shape, + dtype=torch.int32, + ) + + # Scales + scales = Parameter( + torch.empty( + scales_and_zp_size, + output_size_per_partition, + dtype=params_dtype, + ), + requires_grad=False, + ) + set_weight_attrs( + scales, + { + **extra_weight_attrs, + "input_dim": scales_and_zp_input_dim, + "output_dim": 1, + }, + ) + + # Quantized zero-points + qzeros = Parameter( + torch.empty( + scales_and_zp_size, + output_size_per_partition // self.quant_config.pack_factor, + dtype=torch.int32, + device="meta", + ), + requires_grad=False, + ) + set_weight_attrs( + qzeros, + { + **extra_weight_attrs, + "input_dim": scales_and_zp_input_dim, + "output_dim": 1, + "packed_dim": 1, + "pack_factor": self.quant_config.pack_factor, + }, + ) + + # Allocate marlin workspace + max_workspace_size = ( + output_size_per_partition // + self.quant_config.min_thread_n) * self.quant_config.max_parallel + workspace = torch.zeros(max_workspace_size, + dtype=torch.int, + requires_grad=False) + + layer.register_parameter("qweight", qweight) + layer.register_parameter("g_idx", g_idx) + layer.register_parameter("scales", scales) + layer.register_parameter("qzeros", qzeros) + layer.g_idx_sort_indices = g_idx_sort_indices + layer.workspace = workspace + layer.input_size_per_partition = input_size_per_partition + layer.output_size_per_partition = output_size_per_partition + layer.input_size = input_size + layer.is_k_full = is_k_full + layer.marlin_state = GPTQMarlinState.REPACK + + def apply( + self, + layer: torch.nn.Module, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + reshaped_x = x.reshape(-1, x.shape[-1]) + + size_m = reshaped_x.shape[0] + part_size_n = layer.output_size_per_partition + part_size_k = layer.input_size_per_partition + full_size_k = layer.input_size + + out_shape = x.shape[:-1] + (part_size_n, ) + + if layer.marlin_state == GPTQMarlinState.REPACK: + layer.marlin_state = GPTQMarlinState.READY + + # Newly generated tensors need to replace existing tensors that are + # already registered as parameters by vLLM (and won't be freed) + def replace_tensor(name, new_t): + # It is important to use resize_() here since it ensures + # the same buffer is reused + getattr(layer, name).resize_(new_t.shape) + getattr(layer, name).copy_(new_t) + del new_t + + cur_device = layer.qweight.device + + # Process act_order + if self.quant_config.desc_act: + # Get sorting based on g_idx + g_idx_sort_indices = torch.argsort(layer.g_idx).to(torch.int) + + sorted_g_idx = layer.g_idx[g_idx_sort_indices] + + replace_tensor("g_idx", sorted_g_idx) + replace_tensor("g_idx_sort_indices", g_idx_sort_indices) + + else: + # Reset g_idx related tensors + layer.g_idx = Parameter( + torch.empty(0, dtype=torch.int, device=cur_device), + requires_grad=False, + ) + layer.g_idx_sort_indices = Parameter( + torch.empty(0, dtype=torch.int, device=cur_device), + requires_grad=False, + ) + + # Repack weights + marlin_qweight = ops.gptq_marlin_repack( + layer.qweight, + layer.g_idx_sort_indices, + part_size_k, + part_size_n, + self.quant_config.weight_bits, + ) + replace_tensor("qweight", marlin_qweight) + + # Permute scales + scales_size_k = part_size_k + scales_size_n = part_size_n + if self.quant_config.desc_act: + scales_size_k = full_size_k + + marlin_scales = marlin_permute_scales( + layer.scales, + scales_size_k, + scales_size_n, + self.quant_config.group_size, + self.quant_config.weight_bits, + ) + replace_tensor("scales", marlin_scales) + + output = ops.gptq_marlin_gemm( + reshaped_x, + layer.qweight, + layer.scales, + layer.g_idx, + layer.g_idx_sort_indices, + layer.workspace, + self.quant_config.weight_bits, + size_m, + part_size_n, + part_size_k, + layer.is_k_full, + ) + + if bias is not None: + output.add_(bias) # In-place add + + return output.reshape(out_shape) diff --git a/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/quantizer.py b/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/quantizer.py new file mode 100644 index 0000000..e945a70 --- /dev/null +++ b/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/quantizer.py @@ -0,0 +1,140 @@ +from logging import getLogger + +import torch +import torch.nn as nn + + +logger = getLogger(__name__) + + +def quantize(x, scale, zero, maxq): + if maxq < 0: + return (x > scale / 2).float() * scale + (x < zero / 2).float() * zero + q = torch.clamp(torch.round(x / scale) + zero, 0, maxq) + return scale * (q - zero) + + +class Quantizer(nn.Module): + def __init__(self, shape=1): + super(Quantizer, self).__init__() + self.register_buffer("maxq", torch.tensor(0)) + self.register_buffer("scale", torch.zeros(shape)) + self.register_buffer("zero", torch.zeros(shape)) + + def configure( + self, + bits, + perchannel=False, + sym=True, + mse=False, + norm=2.4, + grid=100, + maxshrink=0.8, + trits=False, + ): + self.maxq = torch.tensor(2**bits - 1) + self.perchannel = perchannel + self.sym = sym + self.mse = mse + self.norm = norm + self.grid = grid + self.maxshrink = maxshrink + if trits: + self.maxq = torch.tensor(-1) + + def find_params(self, x, weight=False): + dev = x.device + self.maxq = self.maxq.to(dev) + + shape = x.shape + if self.perchannel: + if weight: + x = x.flatten(1) + else: + if len(shape) == 4: + x = x.permute([1, 0, 2, 3]) + x = x.flatten(1) + if len(shape) == 3: + x = x.reshape((-1, shape[-1])).t() + if len(shape) == 2: + x = x.t() + else: + x = x.flatten().unsqueeze(0) + + tmp = torch.zeros(x.shape[0], device=dev) + xmin = torch.minimum(x.min(1)[0], tmp) + xmax = torch.maximum(x.max(1)[0], tmp) + + if self.sym: + xmax = torch.maximum(torch.abs(xmin), xmax) + tmp = xmin < 0 + if torch.any(tmp): + xmin[tmp] = -xmax[tmp] + tmp = (xmin == 0) & (xmax == 0) + xmin[tmp] = -1 + xmax[tmp] = +1 + + if self.maxq < 0: + self.scale = xmax + self.zero = xmin + else: + self.scale = (xmax - xmin) / self.maxq + if self.sym: + self.zero = torch.full_like(self.scale, (self.maxq + 1) / 2) + else: + self.zero = torch.round(-xmin / self.scale) + + if self.mse: + best = torch.full([x.shape[0]], float("inf"), device=dev) + for i in range(int(self.maxshrink * self.grid)): + p = 1 - i / self.grid + xmin1 = p * xmin + xmax1 = p * xmax + scale1 = (xmax1 - xmin1) / self.maxq + zero1 = torch.round(-xmin1 / scale1) if not self.sym else self.zero + q = quantize(x, scale1.unsqueeze(1), zero1.unsqueeze(1), self.maxq) + q -= x + q.abs_() + q.pow_(self.norm) + err = torch.sum(q, 1) + tmp = err < best + if torch.any(tmp): + best[tmp] = err[tmp] + self.scale[tmp] = scale1[tmp] + self.zero[tmp] = zero1[tmp] + if not self.perchannel: + if weight: + tmp = shape[0] + else: + tmp = shape[1] if len(shape) != 3 else shape[2] + self.scale = self.scale.repeat(tmp) + self.zero = self.zero.repeat(tmp) + + if weight: + shape = [-1] + [1] * (len(shape) - 1) + self.scale = self.scale.reshape(shape) + self.zero = self.zero.reshape(shape) + return + if len(shape) == 4: + self.scale = self.scale.reshape((1, -1, 1, 1)) + self.zero = self.zero.reshape((1, -1, 1, 1)) + if len(shape) == 3: + self.scale = self.scale.reshape((1, 1, -1)) + self.zero = self.zero.reshape((1, 1, -1)) + if len(shape) == 2: + self.scale = self.scale.unsqueeze(0) + self.zero = self.zero.unsqueeze(0) + + def quantize(self, x): + if self.ready(): + return quantize(x, self.scale, self.zero, self.maxq) + return x + + def enabled(self): + return self.maxq > 0 + + def ready(self): + return torch.all(self.scale != 0) + + +__all__ = ["Quantizer"] diff --git a/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/repack.py b/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/repack.py new file mode 100644 index 0000000..987f05b --- /dev/null +++ b/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/repack.py @@ -0,0 +1,99 @@ +import torch +import enum +from enum import Enum +from typing import Any, Dict, List, Optional +from torch.nn.parameter import Parameter + +def apply( + self, + layer: torch.nn.Module, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None, +) -> torch.Tensor: + reshaped_x = x.reshape(-1, x.shape[-1]) + + size_m = reshaped_x.shape[0] + part_size_n = layer.output_size_per_partition + part_size_k = layer.input_size_per_partition + full_size_k = layer.input_size + + out_shape = x.shape[:-1] + (part_size_n, ) + + if layer.marlin_state == GPTQMarlinState.REPACK: + layer.marlin_state = GPTQMarlinState.READY + + # Newly generated tensors need to replace existing tensors that are + # already registered as parameters by vLLM (and won't be freed) + def replace_tensor(name, new_t): + # It is important to use resize_() here since it ensures + # the same buffer is reused + getattr(layer, name).resize_(new_t.shape) + getattr(layer, name).copy_(new_t) + del new_t + + cur_device = layer.qweight.device + + # Process act_order + if self.quant_config.desc_act: + # Get sorting based on g_idx + g_idx_sort_indices = torch.argsort(layer.g_idx).to(torch.int) + + sorted_g_idx = layer.g_idx[g_idx_sort_indices] + + replace_tensor("g_idx", sorted_g_idx) + replace_tensor("g_idx_sort_indices", g_idx_sort_indices) + + else: + # Reset g_idx related tensors + layer.g_idx = Parameter( + torch.empty(0, dtype=torch.int, device=cur_device), + requires_grad=False, + ) + layer.g_idx_sort_indices = Parameter( + torch.empty(0, dtype=torch.int, device=cur_device), + requires_grad=False, + ) + + # Repack weights + marlin_qweight = ops.gptq_marlin_repack( + layer.qweight, + layer.g_idx_sort_indices, + part_size_k, + part_size_n, + self.quant_config.weight_bits, + ) + replace_tensor("qweight", marlin_qweight) + + # Permute scales + scales_size_k = part_size_k + scales_size_n = part_size_n + if self.quant_config.desc_act: + scales_size_k = full_size_k + + marlin_scales = marlin_permute_scales( + layer.scales, + scales_size_k, + scales_size_n, + self.quant_config.group_size, + self.quant_config.weight_bits, + ) + replace_tensor("scales", marlin_scales) + + output = ops.gptq_marlin_gemm( + reshaped_x, + layer.qweight, + layer.scales, + layer.g_idx, + layer.g_idx_sort_indices, + layer.workspace, + self.quant_config.weight_bits, + size_m, + part_size_n, + part_size_k, + layer.is_k_full, + ) + + if bias is not None: + output.add_(bias) # In-place add + + return output.reshape(out_shape) diff --git a/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/utils/__init__.py b/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/utils/format_24.py b/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/utils/format_24.py new file mode 100644 index 0000000..01c8cf7 --- /dev/null +++ b/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/utils/format_24.py @@ -0,0 +1,308 @@ +# +# Modified by Roberto Lopez Castro (roberto.lopez.castro@udc.es). +# + +import torch + + +# This is PyTorch implementation of main part of reorder_meta() +# function, from tools/util/include/cutlass/util/host_reorder.h file +# of CUTLASS source tree. Furthermore, CUTLASS template for sparse +# GEMM decides upon layout of this matrix, and at the moment for the +# sparse GEMM executed on tensor cores, this is layout described by +# ColumnMajorInterleaved<2> data structure, in +# include/cutlass/layout/matrix.h of CUTLASS source tree. The +# reordering of meta matrix into meta_reordered matrix calculated +# according to these segments of CUTLASS code is re-implemented here. +# Note that this calculation produces offsets for scattering metadata +# matrix elements into reordered metadata matrix elements (or, +# equivalently, for gathering reordered metadata matrix element back +# into metadata matrix elements). +def _calculate_meta_reordering_scatter_offsets(m, meta_ncols, meta_dtype, + device): + dst_rows = torch.arange(0, m, device=device)[:, None].repeat(1, meta_ncols) + dst_cols = torch.arange(0, meta_ncols, device=device).repeat(m, 1) + + # Reorder the rows, then swizzle the 2x2 blocks. + group_x = 64 + group_y = 32 if meta_dtype.itemsize == 2 else 16 + + dst_rows = (dst_rows // group_x * group_x + (dst_rows % 2) * 2 + + (dst_rows % 8) // 4 + ((dst_rows % group_y) % 4) // 2 * 32 + + ((dst_rows % group_x) // 8) * 4) + + topright = ((dst_rows % 2 == 0) & (dst_cols % 2 == 1)).to(torch.int8) + bottomleft = ((dst_rows % 2 == 1) & (dst_cols % 2 == 0)).to(torch.int8) + dst_rows += topright - bottomleft + dst_cols -= topright - bottomleft + + # Assumed that meta tensor is to be stored in CUTLASS + # InterleavedColumnMajor layout, and reverse engineered + # corresponding code to store values into this tensor. + interleave = 2 + cols_maj = dst_cols // interleave + cols_min = dst_cols % interleave + return (cols_maj * m * interleave + dst_rows * interleave + + cols_min).view(-1) + + +# This function converts dense matrix into sparse semi-structured +# representation, producing "compressed" matrix, in the layout used by +# CUTLASS backend, and corresponding metadata matrix. +def sparse_semi_structured_from_dense_cutlass(dense): + if dense.dim() != 2: + raise RuntimeError( + f"Expected 2-dimensional dense tensor, got {dense.dim()}-dimensional tensor" # noqa: E501 + ) + + m, k = dense.shape + device = dense.device + + meta_dtype = torch.int8 + if dense.dtype == torch.int8: + meta_dtype = torch.int32 + elif dense.dtype in [torch.half, torch.bfloat16, torch.float, torch.int32]: + meta_dtype = torch.int16 + else: + raise RuntimeError(f"Invalid datatype {dense.dtype} of dense matrix") + quadbits_per_meta_elem = meta_dtype.itemsize * 8 // 4 + if quadbits_per_meta_elem not in (4, 8): + raise RuntimeError( + "Invalid number of elements per meta element calculated") + + if meta_dtype == torch.int32: + if m % 16 != 0: + raise RuntimeError( + f"Number of rows of dense matrix {m} must be divisible by 16") + else: + if m % 32 != 0: + raise RuntimeError( + f"Number of rows of dense matrix {m} must be divisible by 32") + if k % (4 * quadbits_per_meta_elem) != 0: + raise RuntimeError( + f"Number of columns of dense matrix {k} must be divisible by {4 * quadbits_per_meta_elem}" # noqa: E501 + ) + + if dense.dtype != torch.float: + ksparse = 4 + dense_4 = dense.view(-1, k // ksparse, ksparse) + m0, m1, m2, m3 = (dense_4 != 0).unbind(-1) + else: + ksparse = 2 + dense_2 = dense.view(-1, k // ksparse, ksparse) + m0, m2 = m1, m3 = (dense_2 != 0).unbind(-1) + meta_ncols = k // (ksparse * quadbits_per_meta_elem) + + # Encoding quadruples of True/False values as follows: + # [True, True, False, False] -> 0b0100 + # [True, False, True, False] -> 0b1000 + # [False, True, True, False] -> 0b1001 + # [True, False, False, True ] -> 0b1100 + # [False, True, False, True ] -> 0b1101 + # [False, False, True, True ] -> 0b1110 + # Thus, lower two bits in the encoding are index of the True value + # at the lowest index in the quadruple, and the higher two bits in + # the encoding are index of the other True value in the quadruple. + # In case there are less than two True values, than False value or + # values at some index or indices are considered True for the + # encoding. In case there are more than two True values, then the + # excess True value(s) at some indices are considered False for + # the encoding. The exact encodings used for these cases are as + # follows: + # [False, False, False, False] -> 0b1110 + # [False, False, False, True ] -> 0b1110 + # [False, False, True, False] -> 0b1110 + # [False, True, False, False] -> 0b1001 + # [False, True, True, True ] -> 0b1101 + # [True, False, False, False] -> 0b1000 + # [True, False, True, True ] -> 0b1100 + # [True, True, False, True ] -> 0b0100 + # [True, True, True, False] -> 0b0100 + # [True, True, True, True ] -> 0b0100 + # These particular encodings are chosen, with the help of Espresso + # logic minimizer software, for the purpose of minimization of + # corresponding Boolean functions, that translate non-zero flags + # into encoding bits. Note also possible choices for the first + # and last of these encodings were limited only to (0b0100, + # 0b1110), in order to produce valid encodings for 1:2 sparsity + # case. + + expr0 = m0 & m1 + expr1 = ~m0 & m1 + expr2 = ~m0 & ~m1 + bit0 = expr1 + bit1 = expr2 + bit2 = expr0 | expr2 | m3 + bit3 = expr1 | ~m1 + idxs0 = bit0 | (bit1.to(torch.int64) << 1) + idxs1 = bit2 | (bit3.to(torch.int64) << 1) + + if dense.dtype != torch.float: + sparse0 = dense_4.gather( + -1, idxs0.unsqueeze(-1)) # type: ignore[possibly-undefined] + sparse1 = dense_4.gather(-1, idxs1.unsqueeze(-1)) + sparse = torch.stack((sparse0, sparse1), dim=-1).view(m, k // 2) + else: + sparse = dense_2.gather(-1, + idxs0.unsqueeze(-1) // 2).view( + m, + k // 2) # type: ignore[possibly-undefined] + + meta_4 = idxs0 | (idxs1 << 2) + meta_n = meta_4.view( + (-1, meta_ncols, quadbits_per_meta_elem)).to(meta_dtype) + + if quadbits_per_meta_elem == 4: + meta = (meta_n[:, :, 0] + | (meta_n[:, :, 1] << 4) + | (meta_n[:, :, 2] << 8) + | (meta_n[:, :, 3] << 12)) + elif quadbits_per_meta_elem == 8: + meta = (meta_n[:, :, 0] + | (meta_n[:, :, 1] << 4) + | (meta_n[:, :, 2] << 8) + | (meta_n[:, :, 3] << 12) + | (meta_n[:, :, 4] << 16) + | (meta_n[:, :, 5] << 20) + | (meta_n[:, :, 6] << 24) + | (meta_n[:, :, 7] << 28)) + + # Reorder meta tensor elements. + meta_reordered = meta.new_empty( + (m * meta_ncols, )) # type: ignore[possibly-undefined] + meta_offsets = _calculate_meta_reordering_scatter_offsets( + m, meta_ncols, meta_dtype, device) + meta_reordered.scatter_(0, meta_offsets, meta.view(-1)) + + return (sparse, meta_reordered.view(m, meta_ncols)) + + +# This function performs reverse of the function above - it +# reconstructs dense matrix from a pair of "compressed" matrix, given +# in the layout used by CUTLASS backend, and accompanying metadata +# matrix. +def sparse_semi_structured_to_dense_cutlass(sparse, meta_reordered): + if sparse.dim() != 2: + raise RuntimeError( + f"Expected 2-dimensional sparse tensor, got {sparse.dim()}-dimensional tensor" # noqa: E501 + ) + + m, k = sparse.shape + device = sparse.device + + if meta_reordered.dim() != 2: + raise RuntimeError( + f"Expected 2-dimensional meta tensor, got {meta_reordered.dim()}-dimensional tensor" # noqa: E501 + ) + if meta_reordered.device != device: + raise RuntimeError( + f"Expected meta matrix to be on {device} device, got matrix on {meta_reordered.device} device" # noqa: E501 + ) + + meta_dtype = meta_reordered.dtype + if meta_dtype not in (torch.int16, torch.int32): + raise RuntimeError(f"Invalid datatype {meta_dtype} of meta matrix") + quadbits_per_meta_elem = meta_dtype.itemsize * 8 // 4 + + ksparse = 4 if sparse.dtype != torch.float else 2 + + meta_nrows, meta_ncols = meta_reordered.shape + if meta_nrows != m: + raise RuntimeError( + f"Number of rows of meta matrix {meta_nrows} must be equal to number of columns of spase matrix {m}" # noqa: E501 + ) + if meta_ncols * ksparse * quadbits_per_meta_elem != 2 * k: + raise RuntimeError( + f"Number of columns of sparse matrix {k} different from the {meta_ncols * ksparse * quadbits_per_meta_elem // 2}, " # noqa: E501 + "expected according to the number of columns of meta matrix") + + # Undo meta tensor elements reordering. + meta_offsets = _calculate_meta_reordering_scatter_offsets( + m, meta_ncols, meta_dtype, device) + meta = torch.gather(meta_reordered.view(-1), 0, + meta_offsets).view(m, meta_ncols) + + # Unpack sparse tensor back to original dense tensor, using + # information provided by meta tensor. Note that torch.float + # datatype is handled pretty much the same as + # torch.half/torch.bfloat16, as metadata for a pair of torch.float + # value is encoded as if underlying 8 bytes contain four + # torch.half/torch.bfloat16 values, where either first two or last + # two are zeros. + meta_2 = torch.empty( + (m, meta_ncols, 2 * quadbits_per_meta_elem), + dtype=meta_dtype, + device=device, + ) + if quadbits_per_meta_elem == 4: + meta_2[:, :, 0] = meta & 0b11 + meta_2[:, :, 1] = (meta >> 2) & 0b11 + meta_2[:, :, 2] = (meta >> 4) & 0b11 + meta_2[:, :, 3] = (meta >> 6) & 0b11 + meta_2[:, :, 4] = (meta >> 8) & 0b11 + meta_2[:, :, 5] = (meta >> 10) & 0b11 + meta_2[:, :, 6] = (meta >> 12) & 0b11 + meta_2[:, :, 7] = (meta >> 14) & 0b11 + elif quadbits_per_meta_elem == 8: + meta_2[:, :, 0] = meta & 0b11 + meta_2[:, :, 1] = (meta >> 2) & 0b11 + meta_2[:, :, 2] = (meta >> 4) & 0b11 + meta_2[:, :, 3] = (meta >> 6) & 0b11 + meta_2[:, :, 4] = (meta >> 8) & 0b11 + meta_2[:, :, 5] = (meta >> 10) & 0b11 + meta_2[:, :, 6] = (meta >> 12) & 0b11 + meta_2[:, :, 7] = (meta >> 14) & 0b11 + meta_2[:, :, 8] = (meta >> 16) & 0b11 + meta_2[:, :, 9] = (meta >> 18) & 0b11 + meta_2[:, :, 10] = (meta >> 20) & 0b11 + meta_2[:, :, 11] = (meta >> 22) & 0b11 + meta_2[:, :, 12] = (meta >> 24) & 0b11 + meta_2[:, :, 13] = (meta >> 26) & 0b11 + meta_2[:, :, 14] = (meta >> 28) & 0b11 + meta_2[:, :, 15] = (meta >> 30) & 0b11 + + dense_offsets = meta_2.view(-1) + ( + torch.arange(0, 2 * m * k // ksparse, device=device) * 4).view( + -1, 1).repeat(1, 2).view(-1) + + dense = torch.zeros((m * 2 * k, ), dtype=sparse.dtype, device=device) + if sparse.dtype != torch.float: + # dense.scatter_(0, dense_offsets, sparse.view(-1)) + dense.scatter_(0, dense_offsets, sparse.reshape(-1)) + else: + dense.view(torch.half).scatter_(0, dense_offsets, + sparse.view(torch.half).view(-1)) + + return dense.view(m, 2 * k) + + +def mask_creator(tensor): + """ + Class for creating N:M sparsity masks. + Masks will be created using the N:M ratio, where for every block of + M weights, N will be pruned based on ranked weight value. Each mask + will correspond to the given tensor. + + :param N: The number of weights in a group to keep + :param M: The size of a weight group + """ + N = 2 + M = 4 + + mask = None + # for i, tensor in enumerate(tensors): + if tensor.numel() % M != 0: + raise ValueError( + f"Tensor of size {tensor.shape} can't be evenly divided into " + f"{M} groups") + + num_groups = tensor.numel() // M + + # N:M sparsity for linear layers + tensor_temp = tensor.detach().abs().reshape(num_groups, M) + index = torch.argsort(tensor_temp, dim=1)[:, :int(M - N)] + + w_b = torch.ones(tensor_temp.shape, device=tensor_temp.device) + mask = w_b.scatter_(dim=1, index=index, value=0).reshape(tensor.shape) + + return mask diff --git a/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/utils/marlin_24_perms.py b/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/utils/marlin_24_perms.py new file mode 100644 index 0000000..93f65a2 --- /dev/null +++ b/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/utils/marlin_24_perms.py @@ -0,0 +1,60 @@ +"""This file is used for /tests and /benchmarks""" +from typing import Dict, List + +import numpy +import torch + + +# Precompute permutations for Marlin24 weight and scale shuffling # noqa: E501 +# +# Marlin works on [16*2,64] tiles. The goal of the permutations is to reorder the weight data so that it is compatible noqa: # noqa: E501 +# with the tensor-core format that is described here: +# https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#matrix-fragments-for-mma-m16n8k16-with-floating-point-type # noqa: E501 +# +# As a result of this reordering, the vector loads inside the kernel will get the data as it is needed for tensor-core # noqa: E501 +# (without the need to use ldmatrix instructions) # noqa: E501 +def get_perms_24(num_bits: int): + perm_list: List[int] = [] + for i in range(32): + perm1: List[int] = [] + col = i // 4 + col_o = col // 2 + for block in [0, 1]: + for row in [ + 2 * (i % 4), + 2 * (i % 4) + 1, + 2 * (i % 4 + 4), + 2 * (i % 4 + 4) + 1, + ]: + perm1.append(16 * row + col_o * 256 + 8 * (col % 2) + + 4 * block) + for j in range(4): + perm_list.extend([p + 1 * j for p in perm1]) + perm = numpy.array(perm_list) + + if num_bits == 4: + interleave = numpy.array([0, 2, 4, 6, 1, 3, 5, 7]) + elif num_bits == 8: + interleave = numpy.array([0, 2, 1, 3]) + else: + raise ValueError("num_bits must be 4 or 8, got {}".format(num_bits)) + + perm = perm.reshape((-1, len(interleave)))[:, interleave].ravel() + perm = torch.from_numpy(perm) + scale_perm: List[int] = [] + for i in range(8): + scale_perm.extend([i * 8 + j for j in [0, 4, 1, 5, 2, 6, 3, 7]]) + scale_perm_single: List[int] = [] + for i in range(8): + scale_perm_single.extend([8 * i + j for j in [0, 1, 2, 3, 4, 5, 6, 7]]) + return perm, scale_perm, scale_perm_single + + +marlin_24_perm: Dict[int, torch.Tensor] = {} +marlin_24_scale_perm: Dict[int, List[int]] = {} +marlin_24_scale_perm_single: Dict[int, List[int]] = {} +for num_bits in [4, 8]: + perm_24, scale_perm_24, scale_perm_single_24 = get_perms_24(num_bits) + marlin_24_perm[num_bits] = perm_24 + marlin_24_scale_perm[num_bits] = scale_perm_24 + marlin_24_scale_perm_single[num_bits] = scale_perm_single_24 diff --git a/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/utils/marlin_perms.py b/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/utils/marlin_perms.py new file mode 100644 index 0000000..db5e685 --- /dev/null +++ b/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/utils/marlin_perms.py @@ -0,0 +1,60 @@ +"""This file is used for /tests and /benchmarks""" +from typing import Dict, List + +import numpy +import torch + + +# Precompute permutations for Marlin weight and scale shuffling # noqa: E501 +# +# Marlin works on [16,64] tiles. The goal of the permutations is to reorder the weight data so that it is compatible noqa: # noqa: E501 +# with the tensor-core format that is described here: +# https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#matrix-fragments-for-mma-m16n8k16-with-floating-point-type # noqa: E501 +# +# As a result of this reordering, the vector loads inside the kernel will get the data as it is needed for tensor-core # noqa: E501 +# (without the need to use ldmatrix instructions) # noqa: E501 +def get_perms(num_bits: int): + perm_list: List[int] = [] + for i in range(32): + perm1: List[int] = [] + col = i // 4 + for block in [0, 1]: + for row in [ + 2 * (i % 4), + 2 * (i % 4) + 1, + 2 * (i % 4 + 4), + 2 * (i % 4 + 4) + 1, + ]: + perm1.append(16 * row + col + 8 * block) + for j in range(4): + perm_list.extend([p + 256 * j for p in perm1]) + + perm = numpy.array(perm_list) + + if num_bits == 4: + interleave = numpy.array([0, 2, 4, 6, 1, 3, 5, 7]) + elif num_bits == 8: + interleave = numpy.array([0, 2, 1, 3]) + else: + raise Exception("num_bits must be 4 or 8, got {}".format(num_bits)) + + perm = perm.reshape((-1, len(interleave)))[:, interleave].ravel() + perm = torch.from_numpy(perm) + scale_perm: List[int] = [] + for i in range(8): + scale_perm.extend([i + 8 * j for j in range(8)]) + scale_perm_single: List[int] = [] + for i in range(4): + scale_perm_single.extend( + [2 * i + j for j in [0, 1, 8, 9, 16, 17, 24, 25]]) + return perm, scale_perm, scale_perm_single + + +marlin_perm: Dict[int, torch.Tensor] = {} +marlin_scale_perm: Dict[int, List[int]] = {} +marlin_scale_perm_single: Dict[int, List[int]] = {} +for num_bits in [4, 8]: + perm, scale_perm, scale_perm_single = get_perms(num_bits) + marlin_perm[num_bits] = perm + marlin_scale_perm[num_bits] = scale_perm + marlin_scale_perm_single[num_bits] = scale_perm_single diff --git a/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/utils/marlin_utils.py b/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/utils/marlin_utils.py new file mode 100644 index 0000000..7b0398f --- /dev/null +++ b/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/utils/marlin_utils.py @@ -0,0 +1,232 @@ +"""This file is used for /tests and /benchmarks""" +import random + +import numpy +import torch + +from ktransformers.ktransformers_ext.operators.custom_marlin.quantize.utils.format_24 import ( + mask_creator, sparse_semi_structured_from_dense_cutlass) +from ktransformers.ktransformers_ext.operators.custom_marlin.quantize.utils.marlin_24_perms import ( + marlin_24_perm, marlin_24_scale_perm, marlin_24_scale_perm_single) +from ktransformers.ktransformers_ext.operators.custom_marlin.quantize.utils.marlin_perms import ( + marlin_perm, marlin_scale_perm, marlin_scale_perm_single) +from ktransformers.ktransformers_ext.operators.custom_marlin.quantize.utils.quant_utils import ( + get_pack_factor, quantize_weights, sort_weights) + +__cuda_arch = torch.cuda.get_device_capability() + +MARLIN_TILE = 16 + +GPTQ_MARLIN_TILE = 16 +GPTQ_MARLIN_MIN_THREAD_N = 64 +GPTQ_MARLIN_MIN_THREAD_K = 128 +GPTQ_MARLIN_MAX_PARALLEL = 16 + +GPTQ_MARLIN_SUPPORTED_NUM_BITS = [4, 8] +GPTQ_MARLIN_SUPPORTED_GROUP_SIZES = [-1, 32, 64, 128] +GPTQ_MARLIN_SUPPORTED_SYM = [True] + +def is_marlin_supported(): + return __cuda_arch[0] >= 8 + + +def marlin_permute_weights(q_w, size_k, size_n, perm, tile=MARLIN_TILE): + assert q_w.shape == (size_k, size_n) + assert size_k % tile == 0, f"size_k = {size_k}, tile = {tile}" + assert size_n % tile == 0, f"size_k = {size_n}, tile = {tile}" + + # Permute weights to 16x64 marlin tiles + q_w = q_w.reshape((size_k // tile, tile, size_n // tile, tile)) + q_w = q_w.permute((0, 2, 1, 3)) + q_w = q_w.reshape((size_k // tile, size_n * tile)) + + q_w = q_w.reshape((-1, perm.numel()))[:, perm].reshape(q_w.shape) + + return q_w + + +def marlin_weights(q_w, size_k, size_n, num_bits, perm): + # Permute + q_w = marlin_permute_weights(q_w, size_k, size_n, perm) + + # Pack + pack_factor = get_pack_factor(num_bits) + orig_device = q_w.device + + q_w = q_w.cpu().numpy().astype(numpy.uint32) + + q_packed = numpy.zeros((q_w.shape[0], q_w.shape[1] // pack_factor), + dtype=numpy.uint32) + for i in range(pack_factor): + q_packed |= q_w[:, i::pack_factor] << num_bits * i + + q_packed = torch.from_numpy(q_packed.astype(numpy.int32)).to(orig_device) + + return q_packed + + +def marlin_permute_scales(s, size_k, size_n, group_size, scale_perm, + scale_perm_single): + if group_size < size_k and group_size != -1: + s = s.reshape((-1, len(scale_perm)))[:, scale_perm] + else: + s = s.reshape((-1, len(scale_perm_single)))[:, scale_perm_single] + s = s.reshape((-1, size_n)).contiguous() + + return s + + +def marlin_quantize( + w: torch.Tensor, + num_bits: int, + group_size: int, + act_order: bool, +): + size_k, size_n = w.shape + + # Normalize group_size + if group_size == -1: + group_size = size_k + assert group_size <= size_k + + # Quantize (and apply act_order if provided) + w_ref, q_w, s, g_idx, rand_perm = quantize_weights(w, num_bits, group_size, + act_order) + + # For act_order, sort the "weights" and "g_idx" so that group ids are + # increasing + sort_indices = torch.empty(0, dtype=torch.int, device=w.device) + if act_order: + q_w, g_idx, sort_indices = sort_weights(q_w, g_idx) + + # Reformat to marlin + marlin_q_w = marlin_weights(q_w, size_k, size_n, num_bits, + marlin_perm[num_bits]) + marlin_s = marlin_permute_scales(s, size_k, size_n, group_size, + marlin_scale_perm[num_bits], + marlin_scale_perm_single[num_bits]) + + # Create result + res_list = [w_ref, marlin_q_w, marlin_s, g_idx, sort_indices, rand_perm] + for i in range(len(res_list)): + res_list[i] = res_list[i].to(w.device) + + return res_list + + +def inject_24(w, size_k, size_n): + assert w.shape == (size_k, size_n) + + mask = mask_creator(w.t()).t().cuda().bool() + + return (mask * w).contiguous(), mask.contiguous() + + +def check_24(w, num_rows_to_sample=50, _verbose=False): + BLOCK_SIZE = 4 + MAX_NON_ZEROS = 2 + + w = w.t().contiguous() + + print("check_24: w.shape = {}".format(w.shape)) + + num_rows, num_cols = w.shape + sampled_row_idxs = random.choices(range(num_rows), k=num_rows_to_sample) + if _verbose: + print(f"Sampled row idxs = {sampled_row_idxs}") + + total_segments = 0 + non_24_segments = 0 + for i in sampled_row_idxs: + for j in range(0, num_cols - BLOCK_SIZE, BLOCK_SIZE): + total_segments += 1 + block = w[i, j:j + BLOCK_SIZE] + num_nonzero = torch.count_nonzero(block) + if num_nonzero > MAX_NON_ZEROS: + print("i = {} j = {} block = {}".format(i, j, block)) + non_24_segments += 1 + + print(f"{non_24_segments} / {total_segments} do not have 2:4 structure.") + + +def compress_quantized_24_weight(q_24, size_k, size_n, num_bits): + assert q_24.shape == (size_k, size_n) + + # Remove zp to normalize over 0 + max_q_val = (1 << num_bits) - 1 + zp = (max_q_val + 1) // 2 + q_24_no_zp = q_24 - zp + + # Compress + q_24_no_zp = q_24_no_zp.t().contiguous() + q_24_no_zp_comp, meta = sparse_semi_structured_from_dense_cutlass( + q_24_no_zp) + q_24_no_zp_comp = q_24_no_zp_comp.t().contiguous() + + # Restore zp + q_24_comp = q_24_no_zp_comp + zp + + # Resize meta to its actual shape (without moving any data) + meta = meta.resize_(meta.shape[1] // 2, meta.shape[0] * 2) + + return q_24_comp, meta + + +def marlin_24_quantize( + w: torch.Tensor, + num_bits: int, + group_size: int, +): + size_k, size_n = w.shape + + # Normalize group_size + if group_size == -1: + group_size = size_k + assert group_size <= size_k + + # Inject 2:4 sparsity + w_24, mask_24 = inject_24(w, size_k, size_n) + + # Quantize + w_24_ref, q_w_24, s, g_idx, rand_perm = quantize_weights(w_24, + num_bits, + group_size, + act_order=False) + + # Compress quantized weight + q_w_24_comp, meta = compress_quantized_24_weight(q_w_24, size_k, size_n, + num_bits) + size_k_comp = size_k // 2 + + # Reformat to marlin + marlin_24_q_w_comp = marlin_weights(q_w_24_comp, size_k_comp, size_n, + num_bits, marlin_24_perm[num_bits]) + marlin_24_s = marlin_permute_scales(s, size_k, size_n, group_size, + marlin_24_scale_perm[num_bits], + marlin_24_scale_perm_single[num_bits]) + + # Create result + res_list = [w_24_ref, marlin_24_q_w_comp, meta, marlin_24_s] + for i in range(len(res_list)): + res_list[i] = res_list[i].to(w.device) + + return res_list + + +def compute_max_diff(output, output_ref): + return torch.mean(torch.abs(output - output_ref)) / torch.mean( + torch.abs(output_ref)) + + +class MarlinWorkspace: + + def __init__(self, out_features, min_thread_n, max_parallel): + assert (out_features % min_thread_n == 0), ( + "out_features = {} is undivisible by min_thread_n = {}".format( + out_features, min_thread_n)) + + max_workspace_size = ((out_features // min_thread_n) * max_parallel) + + self.scratch = torch.zeros(max_workspace_size, + dtype=torch.int, + device="cuda") diff --git a/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/utils/quant_utils.py b/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/utils/quant_utils.py new file mode 100644 index 0000000..b3a0ba5 --- /dev/null +++ b/ktransformers/ktransformers_ext/operators/custom_marlin/quantize/utils/quant_utils.py @@ -0,0 +1,146 @@ +"""This file is used for /tests and /benchmarks""" +import numpy +import torch + +SUPPORTED_NUM_BITS = [4, 8] +SUPPORTED_GROUP_SIZES = [-1, 32, 64, 128] + + +def get_pack_factor(num_bits): + assert num_bits in SUPPORTED_NUM_BITS, f"Unsupported num_bits = {num_bits}" + return 32 // num_bits + + +def permute_rows(q_w: torch.Tensor, w_ref: torch.Tensor, group_size: int): + assert q_w.shape == w_ref.shape + + orig_device = q_w.device + k_size, _ = q_w.shape + + g_idx = torch.zeros((k_size, ), dtype=torch.int32) + for i in range(k_size): + g_idx[i] = i // group_size + + # Simulate act_order by doing a random permutation on K + rand_perm = torch.randperm(k_size) + + g_idx = g_idx[rand_perm].contiguous() + q_w = q_w[rand_perm, :].contiguous() + w_ref = w_ref[rand_perm, :].contiguous() + + return ( + w_ref.to(device=orig_device), + q_w.to(device=orig_device), + g_idx.to(device=orig_device), + rand_perm.to(device=orig_device), + ) + + +def quantize_weights(w: torch.Tensor, num_bits: int, group_size: int, + act_order: bool): + orig_device = w.device + size_k, size_n = w.shape + + assert w.is_floating_point(), "w must be float" + assert num_bits in SUPPORTED_NUM_BITS, f"Unsupported num_bits = {num_bits}" + assert group_size in SUPPORTED_GROUP_SIZES + [ + size_k + ], f"Unsupported groupsize = {group_size}" + + if group_size == -1: + group_size = size_k + assert group_size <= size_k + + max_q_val = 2**num_bits - 1 + half_q_val = (max_q_val + 1) // 2 + + # Reshape to [groupsize, -1] + if group_size < size_k: + w = w.view((-1, group_size, size_n)) + w = w.permute(1, 0, 2) + w = w.reshape((group_size, -1)) + + # Compute scale for each group + s = torch.max(torch.abs(w), 0, keepdim=True)[0] + s *= 2 / max_q_val # 2 => symmetric + + # Quantize + q_w = torch.round(w / s).int() + q_w += half_q_val + q_w = torch.clamp(q_w, 0, max_q_val) + + # Compute ref (dequantized) + w_ref = (q_w - half_q_val).half() * s + + # Restore original shapes + if group_size < size_k: + + def reshape_w(w): + w = w.reshape((group_size, -1, size_n)) + w = w.permute(1, 0, 2) + w = w.reshape((size_k, size_n)).contiguous() + return w + + q_w = reshape_w(q_w) + w_ref = reshape_w(w_ref) + + s = s.reshape((-1, size_n)).contiguous() + + # Apply act_order + g_idx = torch.empty(0, dtype=torch.int, device=w.device) + rand_perm = torch.empty(0, dtype=torch.int, device=w.device) + if act_order: + assert ( + group_size < size_k + ), "For act_order, groupsize = {} must be less than size_k = {}".format( + group_size, size_k) + + w_ref, q_w, g_idx, rand_perm = permute_rows(q_w, w_ref, group_size) + + return ( + w_ref.to(device=orig_device), + q_w.to(device=orig_device), + s.to(device=orig_device), + g_idx.to(device=orig_device), + rand_perm.to(device=orig_device), + ) + + +def sort_weights(q_w: torch.Tensor, g_idx: torch.Tensor): + orig_device = q_w.device + + sort_indices = torch.argsort(g_idx).to( + dtype=torch.int32) # Sort based on g_idx + + g_idx = g_idx[sort_indices].contiguous() + q_w = q_w[sort_indices, :].contiguous() + + return ( + q_w.to(device=orig_device), + g_idx.to(device=orig_device), + sort_indices.to(device=orig_device), + ) + + +def gptq_pack( + q_w: torch.Tensor, + num_bits: int, + size_k: int, + size_n: int, +): + assert q_w.shape == (size_k, size_n) + + pack_factor = get_pack_factor(num_bits) + assert size_k % pack_factor == 0 + + orig_device = q_w.device + + q_w = q_w.cpu().numpy().astype(numpy.uint32) + + q_res = numpy.zeros((size_k // pack_factor, size_n), dtype=numpy.uint32) + + for i in range(pack_factor): + q_res |= q_w[i::pack_factor, :] << num_bits * i + + q_res = torch.from_numpy(q_res.astype(numpy.int32)).to(orig_device) + return q_res diff --git a/ktransformers/ktransformers_ext/operators/llamafile/conversion.h b/ktransformers/ktransformers_ext/operators/llamafile/conversion.h new file mode 100644 index 0000000..2a194d6 --- /dev/null +++ b/ktransformers/ktransformers_ext/operators/llamafile/conversion.h @@ -0,0 +1,32 @@ +/** + * @Description : + * @Author : chenht2022 + * @Date : 2024-07-12 10:07:58 + * @Version : 1.0.0 + * @LastEditors : chenht2022 + * @LastEditTime : 2024-07-25 10:34:55 + * @Copyright (c) 2024 by KVCache.AI, All Rights Reserved. + **/ +#ifndef CPUINFER_CONVERSION_H +#define CPUINFER_CONVERSION_H + +#include +#include "llama.cpp/ggml.h" + +inline void to_float(const void* input, float* output, int size, ggml_type type) { + if (type == ggml_type::GGML_TYPE_F32) { + memcpy(output, input, size * sizeof(float)); + } else { + ggml_internal_get_type_traits(type).to_float(input, output, size); + } +} + +inline void from_float(const float* input, void* output, int size, ggml_type type) { + if (type == ggml_type::GGML_TYPE_F32) { + memcpy(output, input, size * sizeof(float)); + } else { + ggml_internal_get_type_traits(type).from_float(input, output, size); + } +} + +#endif \ No newline at end of file diff --git a/ktransformers/ktransformers_ext/operators/llamafile/linear.cpp b/ktransformers/ktransformers_ext/operators/llamafile/linear.cpp new file mode 100644 index 0000000..bf1935e --- /dev/null +++ b/ktransformers/ktransformers_ext/operators/llamafile/linear.cpp @@ -0,0 +1,47 @@ +/** + * @Description : + * @Author : chenht2022 + * @Date : 2024-07-12 10:07:58 + * @Version : 1.0.0 + * @LastEditors : chenht2022 + * @LastEditTime : 2024-07-25 10:34:58 + * @Copyright (c) 2024 by KVCache.AI, All Rights Reserved. + **/ +#include "linear.h" + +Linear::Linear(LinearConfig config) { + config_ = config; + proj_ = config_.proj; + + input_fp32_.resize(config_.input_size); + proj_input_.resize(config_.input_size * 4); + proj_output_.resize(config_.output_size); +} + +void Linear::warm_up(Backend* backend) { + std::vector input_fp32(config_.input_size); + std::vector input(config_.input_size * ggml_type_size(config_.hidden_type) / ggml_blck_size(config_.hidden_type)); + std::vector output(config_.output_size * ggml_type_size(config_.hidden_type) / ggml_blck_size(config_.hidden_type)); + for (int i = 0; i < config_.input_size; i++) { + input_fp32[i] = 0; + } + from_float(input_fp32.data(), input.data(), config_.input_size, config_.hidden_type); + forward(input.data(), output.data(), backend); +} + +void Linear::forward(const void* input, void* output, Backend* backend) { + const void* proj_input_ptr; + if (config_.hidden_type == ggml_internal_get_type_traits(config_.proj_type).vec_dot_type) { + proj_input_ptr = input; + } else { + to_float(input, input_fp32_.data(), config_.input_size, config_.hidden_type); + from_float(input_fp32_.data(), proj_input_.data(), config_.input_size, ggml_internal_get_type_traits(config_.proj_type).vec_dot_type); + proj_input_ptr = proj_input_.data(); + } + int nth = config_.output_size / config_.stride; + backend->do_work_stealing_job(nth, [&](int task_id) { + int ith = task_id % nth; + llamafile_sgemm(config_.output_size, 1, config_.input_size / ggml_blck_size(config_.proj_type), proj_, config_.input_size / ggml_blck_size(config_.proj_type), proj_input_ptr, config_.input_size / ggml_blck_size(config_.proj_type), proj_output_.data(), config_.output_size, ith, nth, GGML_TASK_TYPE_COMPUTE, config_.proj_type, ggml_internal_get_type_traits(config_.proj_type).vec_dot_type, GGML_TYPE_F32, GGML_PREC_DEFAULT); + }); + from_float(proj_output_.data(), output, config_.output_size, config_.hidden_type); +} \ No newline at end of file diff --git a/ktransformers/ktransformers_ext/operators/llamafile/linear.h b/ktransformers/ktransformers_ext/operators/llamafile/linear.h new file mode 100644 index 0000000..4285551 --- /dev/null +++ b/ktransformers/ktransformers_ext/operators/llamafile/linear.h @@ -0,0 +1,55 @@ +/** + * @Description : + * @Author : chenht2022 + * @Date : 2024-07-12 10:07:58 + * @Version : 1.0.0 + * @LastEditors : chenht2022 + * @LastEditTime : 2024-07-25 10:35:00 + * @Copyright (c) 2024 by KVCache.AI, All Rights Reserved. + **/ +#ifndef CPUINFER_OPERATOR_LINEAR_H +#define CPUINFER_OPERATOR_LINEAR_H + +#include +#include +#include +#include +#include + +#include "../../cpu_backend/backend.h" +#include "conversion.h" +#include "llama.cpp/ggml-impl.h" +#include "llama.cpp/ggml-quants.h" +#include "llama.cpp/ggml.h" +#include "llamafile/sgemm.h" + +struct LinearConfig { + int input_size; + int output_size; + int stride; + void* proj; + ggml_type proj_type; + ggml_type hidden_type; + + LinearConfig() {} + + LinearConfig(int input_size, int output_size, int stride, void* proj, ggml_type proj_type, ggml_type hidden_type) + : input_size(input_size), output_size(output_size), stride(stride), proj(proj), proj_type(proj_type), hidden_type(hidden_type) {} +}; + +class Linear { + public: + Linear(LinearConfig); + void warm_up(Backend* backend); + void forward(const void* input, void* output, Backend* backend); + + private: + LinearConfig config_; + void* proj_; // [output_size * input_size ( /32 if quantized)] + + std::vector input_fp32_; // [input_size] + std::vector proj_input_; // [input_size * 4] + std::vector proj_output_; // [output_size] +}; + +#endif \ No newline at end of file diff --git a/ktransformers/ktransformers_ext/operators/llamafile/mlp.cpp b/ktransformers/ktransformers_ext/operators/llamafile/mlp.cpp new file mode 100644 index 0000000..632c210 --- /dev/null +++ b/ktransformers/ktransformers_ext/operators/llamafile/mlp.cpp @@ -0,0 +1,103 @@ +/** + * @Description : + * @Author : chenht2022 + * @Date : 2024-07-16 10:43:18 + * @Version : 1.0.0 + * @LastEditors : chenht2022 + * @LastEditTime : 2024-07-25 10:35:04 + * @Copyright (c) 2024 by KVCache.AI, All Rights Reserved. + **/ +#include "mlp.h" + +MLP::MLP(MLPConfig config) { + config_ = config; + gate_proj_ = config_.gate_proj; + up_proj_ = config_.up_proj; + down_proj_ = config_.down_proj; + + input_fp32_.resize(config_.hidden_size); + gate_input_.resize(config_.hidden_size * 4); + up_input_.resize(config_.hidden_size * 4); + gate_output_.resize(config_.intermediate_size); + up_output_.resize(config_.intermediate_size); + intermediate_fp32_.resize(config_.intermediate_size); + down_input_.resize(config_.intermediate_size * 4); + down_output_.resize(config_.hidden_size); +} + +void MLP::warm_up(Backend* backend) { + std::vector input_fp32(config_.hidden_size); + std::vector input(config_.hidden_size * ggml_type_size(config_.hidden_type) / ggml_blck_size(config_.hidden_type)); + std::vector output(config_.hidden_size * ggml_type_size(config_.hidden_type) / ggml_blck_size(config_.hidden_type)); + for (int i = 0; i < config_.hidden_size; i++) { + input_fp32[i] = 0; + } + from_float(input_fp32.data(), input.data(), config_.hidden_size, config_.hidden_type); + forward(input.data(), output.data(), backend); +} + +static float act_fn(float x) { + return x / (1.0f + expf(-x)); +} + +void MLP::forward(const void* input, void* output, Backend* backend) { + const void* gate_input_ptr; + const void* up_input_ptr; + if (config_.hidden_type == ggml_internal_get_type_traits(config_.gate_type).vec_dot_type && config_.hidden_type == ggml_internal_get_type_traits(config_.up_type).vec_dot_type) { + gate_input_ptr = up_input_ptr = input; + } else { + to_float(input, input_fp32_.data(), config_.hidden_size, config_.hidden_type); + if (ggml_internal_get_type_traits(config_.gate_type).vec_dot_type == ggml_internal_get_type_traits(config_.up_type).vec_dot_type) { + from_float(input_fp32_.data(), gate_input_.data(), config_.hidden_size, ggml_internal_get_type_traits(config_.gate_type).vec_dot_type); + gate_input_ptr = up_input_ptr = gate_input_.data(); + } else { + if (config_.hidden_type != ggml_internal_get_type_traits(config_.gate_type).vec_dot_type) { + from_float(input_fp32_.data(), gate_input_.data(), config_.hidden_size, ggml_internal_get_type_traits(config_.gate_type).vec_dot_type); + gate_input_ptr = gate_input_.data(); + } else { + gate_input_ptr = input; + } + if (config_.hidden_type != ggml_internal_get_type_traits(config_.up_type).vec_dot_type) { + from_float(input_fp32_.data(), up_input_.data(), config_.hidden_size, ggml_internal_get_type_traits(config_.up_type).vec_dot_type); + up_input_ptr = up_input_.data(); + } else { + up_input_ptr = input; + } + } + } + int nth = config_.intermediate_size / config_.stride; + backend->do_work_stealing_job(nth, [&](int task_id) { + int ith = task_id; + void* gate_proj_ptr = gate_proj_ + ith * config_.stride * config_.hidden_size * ggml_type_size(config_.gate_type) / ggml_blck_size(config_.gate_type); + float* gate_output_ptr = gate_output_.data() + ith * config_.stride; + llamafile_sgemm(config_.stride, 1, config_.hidden_size / ggml_blck_size(config_.gate_type), gate_proj_ptr, config_.hidden_size / ggml_blck_size(config_.gate_type), gate_input_ptr, config_.hidden_size / ggml_blck_size(config_.gate_type), gate_output_ptr, config_.stride, 0, 1, GGML_TASK_TYPE_COMPUTE, config_.gate_type, ggml_internal_get_type_traits(config_.gate_type).vec_dot_type, GGML_TYPE_F32, GGML_PREC_DEFAULT); + void* up_proj_ptr = up_proj_ + ith * config_.stride * config_.hidden_size * ggml_type_size(config_.up_type) / ggml_blck_size(config_.up_type); + float* up_output_ptr = up_output_.data() + ith * config_.stride; + llamafile_sgemm(config_.stride, 1, config_.hidden_size / ggml_blck_size(config_.up_type), up_proj_ptr, config_.hidden_size / ggml_blck_size(config_.up_type), up_input_ptr, config_.hidden_size / ggml_blck_size(config_.up_type), up_output_ptr, config_.stride, 0, 1, GGML_TASK_TYPE_COMPUTE, config_.up_type, ggml_internal_get_type_traits(config_.up_type).vec_dot_type, GGML_TYPE_F32, GGML_PREC_DEFAULT); + for (int i = ith * config_.stride; i < (ith + 1) * config_.stride; i++) { + intermediate_fp32_[i] = act_fn(gate_output_[i]) * up_output_[i]; + } + if (config_.stride % ggml_blck_size(ggml_internal_get_type_traits(config_.down_type).vec_dot_type) == 0) { + float* intermediate_fp32_ptr = intermediate_fp32_.data() + ith * config_.stride; + void* down_input_ptr = down_input_.data() + ith * config_.stride * ggml_type_size(ggml_internal_get_type_traits(config_.down_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.down_type).vec_dot_type); + from_float(intermediate_fp32_ptr, down_input_ptr, config_.stride, ggml_internal_get_type_traits(config_.down_type).vec_dot_type); + } + }); + if (config_.stride % ggml_blck_size(ggml_internal_get_type_traits(config_.down_type).vec_dot_type) != 0) { + from_float(intermediate_fp32_.data(), down_input_.data(), config_.intermediate_size, ggml_internal_get_type_traits(config_.down_type).vec_dot_type); + } + nth = config_.hidden_size / config_.stride; + backend->do_work_stealing_job(nth, [&](int task_id) { + int ith = task_id; + void* down_proj_ptr = down_proj_ + ith * config_.stride * config_.intermediate_size * ggml_type_size(config_.down_type) / ggml_blck_size(config_.down_type); + float* down_output_ptr = down_output_.data() + ith * config_.stride; + llamafile_sgemm(config_.stride, 1, config_.intermediate_size / ggml_blck_size(config_.down_type), down_proj_ptr, config_.intermediate_size / ggml_blck_size(config_.down_type), down_input_.data(), config_.intermediate_size / ggml_blck_size(config_.down_type), down_output_ptr, config_.stride, 0, 1, GGML_TASK_TYPE_COMPUTE, config_.down_type, ggml_internal_get_type_traits(config_.down_type).vec_dot_type, GGML_TYPE_F32, GGML_PREC_DEFAULT); + if (config_.stride % ggml_blck_size(config_.hidden_type) == 0) { + void* output_ptr = output + ith * config_.stride * ggml_type_size(config_.hidden_type) / ggml_blck_size(config_.hidden_type); + from_float(down_output_ptr, output_ptr, config_.stride, config_.hidden_type); + } + }); + if (config_.stride % ggml_blck_size(config_.hidden_type) != 0) { + from_float(down_output_.data(), output, config_.hidden_size, config_.hidden_type); + } +} diff --git a/ktransformers/ktransformers_ext/operators/llamafile/mlp.h b/ktransformers/ktransformers_ext/operators/llamafile/mlp.h new file mode 100644 index 0000000..604db77 --- /dev/null +++ b/ktransformers/ktransformers_ext/operators/llamafile/mlp.h @@ -0,0 +1,66 @@ +/** + * @Description : + * @Author : chenht2022 + * @Date : 2024-07-12 10:07:58 + * @Version : 1.0.0 + * @LastEditors : chenht2022 + * @LastEditTime : 2024-07-25 10:35:06 + * @Copyright (c) 2024 by KVCache.AI, All Rights Reserved. + **/ +#ifndef CPUINFER_OPERATOR_MLP_H +#define CPUINFER_OPERATOR_MLP_H + +#include +#include +#include +#include +#include + +#include "../../cpu_backend/backend.h" +#include "conversion.h" +#include "llama.cpp/ggml-impl.h" +#include "llama.cpp/ggml-quants.h" +#include "llama.cpp/ggml.h" +#include "llamafile/sgemm.h" + +struct MLPConfig { + int hidden_size; + int intermediate_size; + int stride; + void* gate_proj; + void* up_proj; + void* down_proj; + ggml_type gate_type; + ggml_type up_type; + ggml_type down_type; + ggml_type hidden_type; + + MLPConfig() {} + + MLPConfig(int hidden_size, int intermediate_size, int stride, void* gate_proj, void* up_proj, void* down_proj, ggml_type gate_type, ggml_type up_type, ggml_type down_type, ggml_type hidden_type) + : hidden_size(hidden_size), intermediate_size(intermediate_size), stride(stride), gate_proj(gate_proj), up_proj(up_proj), down_proj(down_proj), gate_type(gate_type), up_type(up_type), down_type(down_type), hidden_type(hidden_type) {} +}; + +class MLP { + public: + MLP(MLPConfig); + void warm_up(Backend* backend); + void forward(const void* input, void* output, Backend* backend); + + private: + MLPConfig config_; + void* gate_proj_; // [intermediate_size * hidden_size ( /32 if quantized)] + void* up_proj_; // [intermediate_size * hidden_size ( /32 if quantized)] + void* down_proj_; // [hidden_size * intermediate_size ( /32 if quantized)] + + std::vector input_fp32_; // [hidden_size] + std::vector gate_input_; // [hidden_size * 4] + std::vector up_input_; // [hidden_size * 4] + std::vector gate_output_; // [intermediate_size] + std::vector up_output_; // [intermediate_size] + std::vector intermediate_fp32_; // [intermediate_size] + std::vector down_input_; // [intermediate_size * 4] + std::vector down_output_; // [hidden_size] +}; + +#endif \ No newline at end of file diff --git a/ktransformers/ktransformers_ext/operators/llamafile/moe.cpp b/ktransformers/ktransformers_ext/operators/llamafile/moe.cpp new file mode 100644 index 0000000..aaea4a7 --- /dev/null +++ b/ktransformers/ktransformers_ext/operators/llamafile/moe.cpp @@ -0,0 +1,310 @@ +/** + * @Description : + * @Author : chenht2022 + * @Date : 2024-07-22 02:03:22 + * @Version : 1.0.0 + * @LastEditors : chenht2022 + * @LastEditTime : 2024-07-25 10:35:07 + * @Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +**/ +#include "moe.h" +#include +#include "unistd.h" + +void* MOE::buffer_ = nullptr; + +MOE::MOE(MOEConfig config) { + config_ = config; + gate_proj_ = config_.gate_proj; + up_proj_ = config_.up_proj; + down_proj_ = config_.down_proj; + + if (MOE::buffer_ == nullptr) { + uint64_t buffer_size = 0; + buffer_size += sizeof(float) * config_.group_max_len * config_.hidden_size; + buffer_size += config_.group_max_len * config_.hidden_size * ggml_type_size(ggml_internal_get_type_traits(config_.gate_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.gate_type).vec_dot_type); + buffer_size += config_.group_max_len * config_.hidden_size * ggml_type_size(ggml_internal_get_type_traits(config_.up_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.up_type).vec_dot_type); + buffer_size += config_.routed_expert_num * config_.group_max_len * config_.hidden_size * ggml_type_size(ggml_internal_get_type_traits(config_.gate_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.gate_type).vec_dot_type); + buffer_size += config_.routed_expert_num * config_.group_max_len * config_.hidden_size * ggml_type_size(ggml_internal_get_type_traits(config_.up_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.up_type).vec_dot_type); + buffer_size += sizeof(float) * config_.routed_expert_num * config_.group_max_len * config_.intermediate_size; + buffer_size += sizeof(float) * config_.routed_expert_num * config_.group_max_len * config_.intermediate_size; + buffer_size += sizeof(float) * config_.routed_expert_num * config_.group_max_len * config_.intermediate_size; + buffer_size += config_.routed_expert_num * config_.group_max_len * config_.intermediate_size * ggml_type_size(ggml_internal_get_type_traits(config_.down_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.down_type).vec_dot_type); + buffer_size += sizeof(float) * config_.routed_expert_num * config_.group_max_len * config_.hidden_size; + buffer_size += sizeof(float) * config_.group_max_len * config_.hidden_size; + buffer_ = malloc(buffer_size); + } + + uint64_t offset = 0; + s_input_fp32_ = (float*)(buffer_ + offset); + offset += sizeof(float) * config_.hidden_size; + s_gate_input_ = (uint8_t*)(buffer_ + offset); + offset += config_.hidden_size * ggml_type_size(ggml_internal_get_type_traits(config_.gate_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.gate_type).vec_dot_type); + s_up_input_ = (uint8_t*)(buffer_ + offset); + offset += config_.hidden_size * ggml_type_size(ggml_internal_get_type_traits(config_.up_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.up_type).vec_dot_type); + s_gate_output_.resize(config_.routed_expert_num); + s_up_output_.resize(config_.routed_expert_num); + s_intermediate_fp32_.resize(config_.routed_expert_num); + s_down_input_.resize(config_.routed_expert_num); + s_down_output_.resize(config_.routed_expert_num); + for (int i = 0; i < config_.routed_expert_num; i++) { + s_gate_output_[i] = (float*)(buffer_ + offset); + offset += sizeof(float) * config_.intermediate_size; + s_up_output_[i] = (float*)(buffer_ + offset); + offset += sizeof(float) * config_.intermediate_size; + s_intermediate_fp32_[i] = (float*)(buffer_ + offset); + offset += sizeof(float) * config_.intermediate_size; + s_down_input_[i] = (uint8_t*)(buffer_ + offset); + offset += config_.intermediate_size * ggml_type_size(ggml_internal_get_type_traits(config_.down_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.down_type).vec_dot_type); + s_down_output_[i] = (float*)(buffer_ + offset); + offset += sizeof(float) * config_.hidden_size; + } + s_output_fp32_ = (float*)(buffer_ + offset); + + offset = 0; + m_input_fp32_.resize(config_.group_max_len); + m_gate_input_.resize(config_.group_max_len); + m_up_input_.resize(config_.group_max_len); + for (int i = 0; i < config_.group_max_len; i++) { + m_input_fp32_[i] = (float*)(buffer_ + offset); + offset += sizeof(float) * config_.hidden_size; + m_gate_input_[i] = (uint8_t*)(buffer_ + offset); + offset += config_.hidden_size * ggml_type_size(ggml_internal_get_type_traits(config_.gate_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.gate_type).vec_dot_type); + m_up_input_[i] = (uint8_t*)(buffer_ + offset); + offset += config_.hidden_size * ggml_type_size(ggml_internal_get_type_traits(config_.up_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.up_type).vec_dot_type); + } + m_local_gate_input_ = (uint8_t*)(buffer_ + offset); + offset += config_.routed_expert_num * config_.group_max_len * config_.hidden_size * ggml_type_size(ggml_internal_get_type_traits(config_.gate_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.gate_type).vec_dot_type); + m_local_up_input_ = (uint8_t*)(buffer_ + offset); + offset += config_.routed_expert_num * config_.group_max_len * config_.hidden_size * ggml_type_size(ggml_internal_get_type_traits(config_.up_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.up_type).vec_dot_type); + m_local_gate_output_ = (float*)(buffer_ + offset); + offset += sizeof(float) * config_.routed_expert_num * config_.group_max_len * config_.intermediate_size; + m_local_up_output_ = (float*)(buffer_ + offset); + offset += sizeof(float) * config_.routed_expert_num * config_.group_max_len * config_.intermediate_size; + m_local_intermediate_fp32_ = (float*)(buffer_ + offset); + offset += sizeof(float) * config_.routed_expert_num * config_.group_max_len * config_.intermediate_size; + m_local_down_input_ = (uint8_t*)(buffer_ + offset); + offset += config_.routed_expert_num * config_.group_max_len * config_.intermediate_size * ggml_type_size(ggml_internal_get_type_traits(config_.down_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.down_type).vec_dot_type); + m_local_down_output_ = (float*)(buffer_ + offset); + offset += sizeof(float) * config_.routed_expert_num * config_.group_max_len * config_.hidden_size; + m_output_fp32_.resize(config_.group_max_len); + for (int i = 0; i < config_.group_max_len; i++) { + m_output_fp32_[i] = (float*)(buffer_ + offset); + offset += sizeof(float) * config_.hidden_size; + } + + m_local_pos_.resize(config_.group_max_len); + for (int i = 0; i < config_.group_max_len; i++) { + m_local_pos_[i].reserve(config_.expert_num); + } + m_local_num_.resize(config_.expert_num); + m_local_gate_input_ptr_.resize(config_.expert_num); + m_local_up_input_ptr_.resize(config_.expert_num); + m_local_gate_output_ptr_.resize(config_.expert_num); + m_local_up_output_ptr_.resize(config_.expert_num); + m_local_intermediate_fp32_ptr_.resize(config_.expert_num); + m_local_down_input_ptr_.resize(config_.expert_num); + m_local_down_output_ptr_.resize(config_.expert_num); +} + +void MOE::warm_up(Backend* backend) { + std::vector input_fp32(config_.hidden_size); + std::vector input(config_.hidden_size * ggml_type_size(config_.hidden_type) / ggml_blck_size(config_.hidden_type)); + std::vector output(config_.hidden_size * ggml_type_size(config_.hidden_type) / ggml_blck_size(config_.hidden_type)); + for (int i = 0; i < config_.hidden_size; i++) { + input_fp32[i] = 0; + } + from_float(input_fp32.data(), input.data(), config_.hidden_size, config_.hidden_type); + for (int i = 0; i < config_.expert_num; i++) { + uint64_t expert_ids = i; + float weights = 0; + forward_one(1, &expert_ids, &weights, input.data(), output.data(), backend); + } +} + +static float act_fn(float x) { + return x / (1.0f + expf(-x)); +} + +void MOE::forward_one(int k, const uint64_t* expert_ids, const float* weights, const void* input, void* output, Backend* backend) { + const void* gate_input_ptr; + const void* up_input_ptr; + if (config_.hidden_type == ggml_internal_get_type_traits(config_.gate_type).vec_dot_type && config_.hidden_type == ggml_internal_get_type_traits(config_.up_type).vec_dot_type) { + gate_input_ptr = up_input_ptr = input; + } else { + to_float(input, s_input_fp32_, config_.hidden_size, config_.hidden_type); + if (ggml_internal_get_type_traits(config_.gate_type).vec_dot_type == ggml_internal_get_type_traits(config_.up_type).vec_dot_type) { + from_float(s_input_fp32_, s_gate_input_, config_.hidden_size, ggml_internal_get_type_traits(config_.gate_type).vec_dot_type); + gate_input_ptr = up_input_ptr = s_gate_input_; + } else { + if (config_.hidden_type != ggml_internal_get_type_traits(config_.gate_type).vec_dot_type) { + from_float(s_input_fp32_, s_gate_input_, config_.hidden_size, ggml_internal_get_type_traits(config_.gate_type).vec_dot_type); + gate_input_ptr = s_gate_input_; + } else { + gate_input_ptr = input; + } + if (config_.hidden_type != ggml_internal_get_type_traits(config_.up_type).vec_dot_type) { + from_float(s_input_fp32_, s_up_input_, config_.hidden_size, ggml_internal_get_type_traits(config_.up_type).vec_dot_type); + up_input_ptr = s_up_input_; + } else { + up_input_ptr = input; + } + } + } + int nth = config_.intermediate_size / config_.stride; + backend->do_work_stealing_job(nth * k, [&](int task_id) { + int expert_idx = task_id / nth; + uint64_t expert_id = expert_ids[expert_idx]; + int ith = task_id % nth; + void* gate_proj_ptr = gate_proj_ + (expert_id * config_.intermediate_size + ith * config_.stride) * config_.hidden_size * ggml_type_size(config_.gate_type) / ggml_blck_size(config_.gate_type); + float* gate_output_ptr = s_gate_output_[expert_idx] + ith * config_.stride; + llamafile_sgemm(config_.stride, 1, config_.hidden_size / ggml_blck_size(config_.gate_type), gate_proj_ptr, config_.hidden_size / ggml_blck_size(config_.gate_type), gate_input_ptr, config_.hidden_size / ggml_blck_size(config_.gate_type), gate_output_ptr, config_.stride, 0, 1, GGML_TASK_TYPE_COMPUTE, config_.gate_type, ggml_internal_get_type_traits(config_.gate_type).vec_dot_type, GGML_TYPE_F32, GGML_PREC_DEFAULT); + void* up_proj_ptr = up_proj_ + (expert_id * config_.intermediate_size + ith * config_.stride) * config_.hidden_size * ggml_type_size(config_.up_type) / ggml_blck_size(config_.up_type); + float* up_output_ptr = s_up_output_[expert_idx] + ith * config_.stride; + llamafile_sgemm(config_.stride, 1, config_.hidden_size / ggml_blck_size(config_.up_type), up_proj_ptr, config_.hidden_size / ggml_blck_size(config_.up_type), up_input_ptr, config_.hidden_size / ggml_blck_size(config_.up_type), up_output_ptr, config_.stride, 0, 1, GGML_TASK_TYPE_COMPUTE, config_.up_type, ggml_internal_get_type_traits(config_.up_type).vec_dot_type, GGML_TYPE_F32, GGML_PREC_DEFAULT); + for (int i = ith * config_.stride; i < (ith + 1) * config_.stride; i++) { + s_intermediate_fp32_[expert_idx][i] = act_fn(s_gate_output_[expert_idx][i]) * s_up_output_[expert_idx][i]; + } + if (config_.stride % ggml_blck_size(ggml_internal_get_type_traits(config_.down_type).vec_dot_type) == 0) { + float* intermediate_fp32_ptr = s_intermediate_fp32_[expert_idx] + ith * config_.stride; + void* down_input_ptr = s_down_input_[expert_idx] + ith * config_.stride * ggml_type_size(ggml_internal_get_type_traits(config_.down_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.down_type).vec_dot_type); + from_float(intermediate_fp32_ptr, down_input_ptr, config_.stride, ggml_internal_get_type_traits(config_.down_type).vec_dot_type); + } + }); + if (config_.stride % ggml_blck_size(ggml_internal_get_type_traits(config_.down_type).vec_dot_type) != 0) { + for (int i = 0; i < k; i++) { + from_float(s_intermediate_fp32_[i], s_down_input_[i], config_.intermediate_size, ggml_internal_get_type_traits(config_.down_type).vec_dot_type); + } + } + nth = config_.hidden_size / config_.stride; + backend->do_work_stealing_job(nth, [&](int task_id) { + int ith = task_id; + for (int i = ith * config_.stride; i < (ith + 1) * config_.stride; i++) { + s_output_fp32_[i] = 0; + } + for (int expert_idx = 0; expert_idx < k; expert_idx++) { + uint64_t expert_id = expert_ids[expert_idx]; + void* down_proj_ptr = down_proj_ + (expert_id * config_.hidden_size + ith * config_.stride) * config_.intermediate_size * ggml_type_size(config_.down_type) / ggml_blck_size(config_.down_type); + float* down_output_ptr = s_down_output_[expert_idx] + ith * config_.stride; + llamafile_sgemm(config_.stride, 1, config_.intermediate_size / ggml_blck_size(config_.down_type), down_proj_ptr, config_.intermediate_size / ggml_blck_size(config_.down_type), s_down_input_[expert_idx], config_.intermediate_size / ggml_blck_size(config_.down_type), down_output_ptr, config_.stride, 0, 1, GGML_TASK_TYPE_COMPUTE, config_.down_type, ggml_internal_get_type_traits(config_.down_type).vec_dot_type, GGML_TYPE_F32, GGML_PREC_DEFAULT); + for (int i = ith * config_.stride; i < (ith + 1) * config_.stride; i++) { + s_output_fp32_[i] += s_down_output_[expert_idx][i] * weights[expert_idx]; + } + } + if (config_.stride % ggml_blck_size(config_.hidden_type) == 0) { + float* output_fp32_ptr = s_output_fp32_ + ith * config_.stride; + void* output_ptr = output + ith * config_.stride * ggml_type_size(config_.hidden_type) / ggml_blck_size(config_.hidden_type); + from_float(output_fp32_ptr, output_ptr, config_.stride, config_.hidden_type); + } + }); + if (config_.stride % ggml_blck_size(config_.hidden_type) != 0) { + from_float(s_output_fp32_, output, config_.hidden_size, config_.hidden_type); + } +} + +void MOE::forward_many(int qlen, int k, const uint64_t* expert_ids, const float* weights, const void* input, void* output, Backend* backend) { + for (int i = 0; i < config_.expert_num; i++) { + m_local_num_[i] = 0; + } + for (int i = 0; i < qlen; i++) { + for (int j = 0; j < k; j++) { + m_local_pos_[i][j] = m_local_num_[expert_ids[i * k + j]]++; + } + } + uint64_t offset = 0; + for (int i = 0; i < config_.expert_num; i++) { + m_local_gate_input_ptr_[i] = m_local_gate_input_ + offset * config_.hidden_size * ggml_type_size(ggml_internal_get_type_traits(config_.gate_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.gate_type).vec_dot_type); + m_local_up_input_ptr_[i] = m_local_up_input_ + offset * config_.hidden_size * ggml_type_size(ggml_internal_get_type_traits(config_.up_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.up_type).vec_dot_type); + m_local_gate_output_ptr_[i] = m_local_gate_output_ + offset * config_.intermediate_size; + m_local_up_output_ptr_[i] = m_local_up_output_ + offset * config_.intermediate_size; + m_local_intermediate_fp32_ptr_[i] = m_local_intermediate_fp32_ + offset * config_.intermediate_size; + m_local_down_input_ptr_[i] = m_local_down_input_ + offset * config_.intermediate_size * ggml_type_size(ggml_internal_get_type_traits(config_.down_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.down_type).vec_dot_type); + m_local_down_output_ptr_[i] = m_local_down_output_ + offset * config_.hidden_size; + offset += m_local_num_[i]; + } + backend->do_work_stealing_job(qlen, [&](int i) { + const void* gate_input_ptr; + const void* up_input_ptr; + if (config_.hidden_type == ggml_internal_get_type_traits(config_.gate_type).vec_dot_type && config_.hidden_type == ggml_internal_get_type_traits(config_.up_type).vec_dot_type) { + gate_input_ptr = up_input_ptr = input + i * config_.hidden_size * ggml_type_size(config_.hidden_type) / ggml_blck_size(config_.hidden_type); + } else { + to_float(input + i * config_.hidden_size * ggml_type_size(config_.hidden_type) / ggml_blck_size(config_.hidden_type), m_input_fp32_[i], config_.hidden_size, config_.hidden_type); + if (ggml_internal_get_type_traits(config_.gate_type).vec_dot_type == ggml_internal_get_type_traits(config_.up_type).vec_dot_type) { + from_float(m_input_fp32_[i], m_gate_input_[i], config_.hidden_size, ggml_internal_get_type_traits(config_.gate_type).vec_dot_type); + gate_input_ptr = up_input_ptr = m_gate_input_[i]; + } else { + if (config_.hidden_type != ggml_internal_get_type_traits(config_.gate_type).vec_dot_type) { + from_float(m_input_fp32_[i], m_gate_input_[i], config_.hidden_size, ggml_internal_get_type_traits(config_.gate_type).vec_dot_type); + gate_input_ptr = m_gate_input_[i]; + } else { + gate_input_ptr = input + i * config_.hidden_size * ggml_type_size(config_.hidden_type) / ggml_blck_size(config_.hidden_type); + } + if (config_.hidden_type != ggml_internal_get_type_traits(config_.up_type).vec_dot_type) { + from_float(m_input_fp32_[i], m_up_input_[i], config_.hidden_size, ggml_internal_get_type_traits(config_.up_type).vec_dot_type); + up_input_ptr = m_up_input_[i]; + } else { + up_input_ptr = input + i * config_.hidden_size * ggml_type_size(config_.hidden_type) / ggml_blck_size(config_.hidden_type); + } + } + } + for (int j = 0; j < k; j++) { + memcpy(m_local_gate_input_ptr_[expert_ids[i * k + j]] + m_local_pos_[i][j] * config_.hidden_size * ggml_type_size(ggml_internal_get_type_traits(config_.gate_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.gate_type).vec_dot_type), gate_input_ptr, config_.hidden_size * ggml_type_size(ggml_internal_get_type_traits(config_.gate_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.gate_type).vec_dot_type)); + memcpy(m_local_up_input_ptr_[expert_ids[i * k + j]] + m_local_pos_[i][j] * config_.hidden_size * ggml_type_size(ggml_internal_get_type_traits(config_.up_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.up_type).vec_dot_type), up_input_ptr, config_.hidden_size * ggml_type_size(ggml_internal_get_type_traits(config_.up_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.up_type).vec_dot_type)); + } + }); + int stride = QK_K; + int nth = config_.intermediate_size / stride; + backend->do_work_stealing_job(nth * config_.expert_num, [&](int task_id) { + int expert_idx = task_id / nth; + int ith = task_id % nth; + void* gate_input_ptr = m_local_gate_input_ptr_[expert_idx]; + void* gate_proj_ptr = gate_proj_ + (expert_idx * config_.intermediate_size + ith * stride) * config_.hidden_size * ggml_type_size(config_.gate_type) / ggml_blck_size(config_.gate_type); + float* gate_output_ptr = m_local_gate_output_ptr_[expert_idx] + ith * stride; + llamafile_sgemm(stride, m_local_num_[expert_idx], config_.hidden_size / ggml_blck_size(config_.gate_type), gate_proj_ptr, config_.hidden_size / ggml_blck_size(config_.gate_type), gate_input_ptr, config_.hidden_size / ggml_blck_size(config_.gate_type), gate_output_ptr, config_.intermediate_size, 0, 1, GGML_TASK_TYPE_COMPUTE, config_.gate_type, ggml_internal_get_type_traits(config_.gate_type).vec_dot_type, GGML_TYPE_F32, GGML_PREC_DEFAULT); + void* up_input_ptr = m_local_up_input_ptr_[expert_idx]; + void* up_proj_ptr = up_proj_ + (expert_idx * config_.intermediate_size + ith * stride) * config_.hidden_size * ggml_type_size(config_.up_type) / ggml_blck_size(config_.up_type); + float* up_output_ptr = m_local_up_output_ptr_[expert_idx] + ith * stride; + llamafile_sgemm(stride, m_local_num_[expert_idx], config_.hidden_size / ggml_blck_size(config_.up_type), up_proj_ptr, config_.hidden_size / ggml_blck_size(config_.up_type), up_input_ptr, config_.hidden_size / ggml_blck_size(config_.up_type), up_output_ptr, config_.intermediate_size, 0, 1, GGML_TASK_TYPE_COMPUTE, config_.up_type, ggml_internal_get_type_traits(config_.up_type).vec_dot_type, GGML_TYPE_F32, GGML_PREC_DEFAULT); + for (int i = 0; i < m_local_num_[expert_idx]; i++) { + for (int j = ith * stride; j < (ith + 1) * stride; j++) { + m_local_intermediate_fp32_ptr_[expert_idx][i * config_.intermediate_size + j] = act_fn(m_local_gate_output_ptr_[expert_idx][i * config_.intermediate_size + j]) * m_local_up_output_ptr_[expert_idx][i * config_.intermediate_size + j]; + } + float* intermediate_fp32_ptr = m_local_intermediate_fp32_ptr_[expert_idx] + i * config_.intermediate_size + ith * stride; + void* down_input_ptr = m_local_down_input_ptr_[expert_idx] + i * config_.intermediate_size * ggml_type_size(ggml_internal_get_type_traits(config_.down_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.down_type).vec_dot_type) + ith * stride * ggml_type_size(ggml_internal_get_type_traits(config_.down_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(config_.down_type).vec_dot_type); + from_float(intermediate_fp32_ptr, down_input_ptr, stride, ggml_internal_get_type_traits(config_.down_type).vec_dot_type); + } + }); + stride = QK_K; + nth = config_.hidden_size / stride; + backend->do_work_stealing_job(nth * config_.expert_num, [&](int task_id) { + int expert_idx = task_id / nth; + int ith = task_id % nth; + void* down_input_ptr = m_local_down_input_ptr_[expert_idx]; + void* down_proj_ptr = down_proj_ + (expert_idx * config_.hidden_size + ith * stride) * config_.intermediate_size * ggml_type_size(config_.down_type) / ggml_blck_size(config_.down_type); + float* down_output_ptr = m_local_down_output_ptr_[expert_idx] + ith * stride; + llamafile_sgemm(stride, m_local_num_[expert_idx], config_.intermediate_size / ggml_blck_size(config_.down_type), down_proj_ptr, config_.intermediate_size / ggml_blck_size(config_.down_type), down_input_ptr, config_.intermediate_size / ggml_blck_size(config_.down_type), down_output_ptr, config_.hidden_size, 0, 1, GGML_TASK_TYPE_COMPUTE, config_.down_type, ggml_internal_get_type_traits(config_.down_type).vec_dot_type, GGML_TYPE_F32, GGML_PREC_DEFAULT); + }); + backend->do_work_stealing_job(qlen, [&](int i) { + for (int e = 0; e < config_.hidden_size; e++) { + m_output_fp32_[i][e] = 0; + } + for (int j = 0; j < k; j++) { + for (int e = 0; e < config_.hidden_size; e++) { + m_output_fp32_[i][e] += m_local_down_output_ptr_[expert_ids[i * k + j]][m_local_pos_[i][j] * config_.hidden_size + e] * weights[i * k + j]; + } + } + from_float(m_output_fp32_[i], output + i * config_.hidden_size * ggml_type_size(config_.hidden_type) / ggml_blck_size(config_.hidden_type), config_.hidden_size, config_.hidden_type); + }); +} + +void MOE::forward(int qlen, int k, const uint64_t* expert_ids, const float* weights, const void* input, void* output, Backend* backend) { + if (qlen < config_.group_min_len) { + for (int i = 0; i < qlen; i++) { + forward_one(k, expert_ids + i * k, weights + i * k, input + i * config_.hidden_size * ggml_type_size(config_.hidden_type) / ggml_blck_size(config_.hidden_type), output + i * config_.hidden_size * ggml_type_size(config_.hidden_type) / ggml_blck_size(config_.hidden_type), backend); + } + return; + } + int forward_len = std::min(config_.group_max_len, qlen); + forward_many(forward_len, k, expert_ids, weights, input, output, backend); + forward(qlen - forward_len, k, expert_ids + forward_len * k, weights + forward_len * k, input + forward_len * config_.hidden_size * ggml_type_size(config_.hidden_type) / ggml_blck_size(config_.hidden_type), output + forward_len * config_.hidden_size * ggml_type_size(config_.hidden_type) / ggml_blck_size(config_.hidden_type), backend); +} \ No newline at end of file diff --git a/ktransformers/ktransformers_ext/operators/llamafile/moe.h b/ktransformers/ktransformers_ext/operators/llamafile/moe.h new file mode 100644 index 0000000..0d279fe --- /dev/null +++ b/ktransformers/ktransformers_ext/operators/llamafile/moe.h @@ -0,0 +1,96 @@ +/** + * @Description : + * @Author : chenht2022 + * @Date : 2024-07-22 02:03:22 + * @Version : 1.0.0 + * @LastEditors : chenht2022 + * @LastEditTime : 2024-07-25 10:35:10 + * @Copyright (c) 2024 by KVCache.AI, All Rights Reserved. + **/ +#ifndef CPUINFER_OPERATOR_MOE_H +#define CPUINFER_OPERATOR_MOE_H + +#include +#include +#include +#include +#include + +#include "../../cpu_backend/backend.h" +#include "conversion.h" +#include "llama.cpp/ggml-impl.h" +#include "llama.cpp/ggml-quants.h" +#include "llama.cpp/ggml.h" +#include "llamafile/sgemm.h" + +struct MOEConfig { + int expert_num; + int routed_expert_num; + int hidden_size; + int intermediate_size; + int stride; + int group_min_len; + int group_max_len; + void* gate_proj; + void* up_proj; + void* down_proj; + ggml_type gate_type; + ggml_type up_type; + ggml_type down_type; + ggml_type hidden_type; + + MOEConfig() {} + + MOEConfig(int expert_num, int routed_expert_num, int hidden_size, int intermediate_size, int stride, int group_min_len, int group_max_len, void* gate_proj, void* up_proj, void* down_proj, ggml_type gate_type, ggml_type up_type, ggml_type down_type, ggml_type hidden_type) + : expert_num(expert_num), routed_expert_num(routed_expert_num), hidden_size(hidden_size), intermediate_size(intermediate_size), stride(stride), group_min_len(group_min_len), group_max_len(group_max_len), gate_proj(gate_proj), up_proj(up_proj), down_proj(down_proj), gate_type(gate_type), up_type(up_type), down_type(down_type), hidden_type(hidden_type) {} +}; + +class MOE { + public: + MOE(MOEConfig); + void warm_up(Backend* backend); + void forward_one(int k, const uint64_t* expert_ids, const float* weights, const void* input, void* output, Backend* backend); + void forward_many(int qlen, int k, const uint64_t* expert_ids, const float* weights, const void* input, void* output, Backend* backend); + void forward(int qlen, int k, const uint64_t* expert_ids, const float* weights, const void* input, void* output, Backend* backend); + + private: + static void* buffer_; + MOEConfig config_; + void* gate_proj_; // [expert_num * intermediate_size * hidden_size ( /32 if quantized)] + void* up_proj_; // [expert_num * intermediate_size * hidden_size ( /32 if quantized)] + void* down_proj_; // [expert_num * hidden_size * intermediate_size ( /32 if quantized)] + + float* s_input_fp32_; // [hidden_size] + uint8_t* s_gate_input_; // [hidden_size * ggml_type_size(ggml_internal_get_type_traits(gate_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(gate_type).vec_dot_type)] + uint8_t* s_up_input_; // [hidden_size * ggml_type_size(ggml_internal_get_type_traits(up_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(up_type).vec_dot_type)] + std::vector s_gate_output_; // [routed_expert_num, intermediate_size] + std::vector s_up_output_; // [routed_expert_num, intermediate_size] + std::vector s_intermediate_fp32_; // [routed_expert_num, intermediate_size] + std::vector s_down_input_; // [routed_expert_num, intermediate_size * ggml_type_size(ggml_internal_get_type_traits(down_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(down_type).vec_dot_type)] + std::vector s_down_output_; // [routed_expert_num, hidden_size] + float* s_output_fp32_; // [hidden_size] + + std::vector m_input_fp32_; // [group_max_len, hidden_size] + std::vector m_gate_input_; // [group_max_len, hidden_size * ggml_type_size(ggml_internal_get_type_traits(gate_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(gate_type).vec_dot_type)] + std::vector m_up_input_; // [group_max_len, hidden_size * ggml_type_size(ggml_internal_get_type_traits(up_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(up_type).vec_dot_type)] + uint8_t* m_local_gate_input_; // [routed_expert_num * group_max_len * hidden_size * ggml_type_size(ggml_internal_get_type_traits(gate_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(gate_type).vec_dot_type)] + uint8_t* m_local_up_input_; // [routed_expert_num * group_max_len * hidden_size * ggml_type_size(ggml_internal_get_type_traits(up_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(up_type).vec_dot_type)] + float* m_local_gate_output_; // [routed_expert_num * group_max_len * intermediate_size] + float* m_local_up_output_; // [routed_expert_num * group_max_len * intermediate_size] + float* m_local_intermediate_fp32_; // [routed_expert_num * group_max_len * intermediate_size] + uint8_t* m_local_down_input_; // [routed_expert_num * group_max_len * intermediate_size * ggml_type_size(ggml_internal_get_type_traits(down_type).vec_dot_type) / ggml_blck_size(ggml_internal_get_type_traits(down_type).vec_dot_type)] + float* m_local_down_output_; // [routed_expert_num * group_max_len * hidden_size] + std::vector m_output_fp32_; // [group_max_len, hidden_size] + + std::vector> m_local_pos_; // [group_max_len, routed_expert_num] + std::vector m_local_num_; // [expert_num] + std::vector m_local_gate_input_ptr_; // [expert_num] + std::vector m_local_up_input_ptr_; // [expert_num] + std::vector m_local_gate_output_ptr_; // [expert_num] + std::vector m_local_up_output_ptr_; // [expert_num] + std::vector m_local_intermediate_fp32_ptr_; // [expert_num] + std::vector m_local_down_input_ptr_; // [expert_num] + std::vector m_local_down_output_ptr_; // [expert_num] +}; + +#endif \ No newline at end of file diff --git a/ktransformers/local_chat.py b/ktransformers/local_chat.py new file mode 100644 index 0000000..59839be --- /dev/null +++ b/ktransformers/local_chat.py @@ -0,0 +1,115 @@ +# Copyright 2024 Shaoyuan Chen +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import platform +import sys +project_dir = os.path.dirname(os.path.dirname(__file__)) +sys.path.insert(0, project_dir) +import torch +import logging +from transformers import ( + AutoTokenizer, + AutoConfig, + AutoModelForCausalLM, + GenerationConfig, + TextStreamer, +) +import json +import fire +from ktransformers.optimize.optimize import optimize_and_load_gguf +from ktransformers.models.modeling_deepseek import DeepseekV2ForCausalLM +from ktransformers.models.modeling_qwen2_moe import Qwen2MoeForCausalLM +from ktransformers.util.utils import prefill_and_generate +from ktransformers.server.config.config import Config + +custom_models = { + "DeepseekV2ForCausalLM": DeepseekV2ForCausalLM, + "Qwen2MoeForCausalLM": Qwen2MoeForCausalLM, +} + +ktransformer_rules_dir = os.path.dirname(os.path.abspath(__file__)) + "/optimize/optimize_rules/" +default_optimize_rules ={ + "DeepseekV2ForCausalLM": ktransformer_rules_dir + "DeepSeek-V2-Chat.yaml", + "Qwen2MoeForCausalLM": ktransformer_rules_dir + "Qwen2-57B-A14B-Instruct.yaml", +} + +def local_chat( + model_path: str, + optimize_rule_path: str = None, + gguf_path: str = None, + max_new_tokens: int = 1000, + cpu_infer: int = Config().cpu_infer +): + torch.set_grad_enabled(False) + + Config().cpu_infer = cpu_infer + tokenizer = AutoTokenizer.from_pretrained(model_path) + config = AutoConfig.from_pretrained(model_path, trust_remote_code=True) + torch.set_default_dtype(config.torch_dtype) + + with torch.device("meta"): + if config.architectures[0] in custom_models: + print("using custom modeling_xxx.py.") + if "Qwen2Moe" in config.architectures[0]: # Qwen2Moe must use flash_attention_2 to avoid overflow. + config._attn_implementation = "flash_attention_2" + model = custom_models[config.architectures[0]](config) + else: + model = AutoModelForCausalLM.from_config( + config, trust_remote_code=True, attn_implementation="flash_attention_2" + ) + + if optimize_rule_path is None: + if config.architectures[0] in default_optimize_rules: + print("using default_optimize_rule for", config.architectures[0]) + optimize_rule_path = default_optimize_rules[config.architectures[0]] + else: + optimize_rule_path = input( + "please input the path of your rule file(yaml file containing optimize rules):" + ) + + if gguf_path is None: + gguf_path = input( + "please input the path of your gguf file(gguf file in the dir containing input gguf file must all belong to current model):" + ) + optimize_and_load_gguf(model, optimize_rule_path, gguf_path, config) + + model.generation_config = GenerationConfig.from_pretrained(model_path) + if model.generation_config.pad_token_id is None: + model.generation_config.pad_token_id = model.generation_config.eos_token_id + model.eval() + + logging.basicConfig(level=logging.INFO) + + system = platform.system() + if (system == u'Windows'): + os.system('cls') + else: + os.system('clear') + + while True: + content = input("Chat: ") + # if content is num + if content == "": + content = "Please write a piece of quicksort code in C++." + + messages = [{"role": "user", "content": content}] + input_tensor = tokenizer.apply_chat_template( + messages, add_generation_prompt=True, return_tensors="pt" + ) + torch.set_default_dtype(torch.bfloat16) # TODO: Remove this, replace dtype using config + generated = prefill_and_generate(model, tokenizer, input_tensor.cuda(), max_new_tokens) + +if __name__ == "__main__": + fire.Fire(local_chat) diff --git a/ktransformers/models/__init__.py b/ktransformers/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ktransformers/models/configuration_deepseek.py b/ktransformers/models/configuration_deepseek.py new file mode 100644 index 0000000..5341fd9 --- /dev/null +++ b/ktransformers/models/configuration_deepseek.py @@ -0,0 +1,207 @@ +# Adapted from +# https://huggingface.co/deepseek-ai/DeepSeek-V2-Chat-0628/blob/main/configuration_deepseek.py +# Copyright 2023 DeepSeek-AI and The HuggingFace Inc. team. All rights reserved. +# Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + +logger = logging.get_logger(__name__) + +DEEPSEEK_PRETRAINED_CONFIG_ARCHIVE_MAP = {} +class DeepseekV2Config(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`DeepseekV2Model`]. It is used to instantiate an DeepSeek + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the DeepSeek-V2. + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + Args: + vocab_size (`int`, *optional*, defaults to 102400): + Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`DeepseekV2Model`] + hidden_size (`int`, *optional*, defaults to 4096): + Dimension of the hidden representations. + intermediate_size (`int`, *optional*, defaults to 11008): + Dimension of the MLP representations. + moe_intermediate_size (`int`, *optional*, defaults to 1407): + Dimension of the MoE representations. + num_hidden_layers (`int`, *optional*, defaults to 32): + Number of hidden layers in the Transformer decoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer decoder. + n_shared_experts (`int`, *optional*, defaults to None): + Number of shared experts, None means dense model. + n_routed_experts (`int`, *optional*, defaults to None): + Number of routed experts, None means dense model. + routed_scaling_factor (`float`, *optional*, defaults to 1.0): + Scaling factor or routed experts. + topk_method (`str`, *optional*, defaults to `gready`): + Topk method used in routed gate. + n_group (`int`, *optional*, defaults to None): + Number of groups for routed experts. + topk_group (`int`, *optional*, defaults to None): + Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups). + num_experts_per_tok (`int`, *optional*, defaults to None): + Number of selected experts, None means dense model. + moe_layer_freq (`int`, *optional*, defaults to 1): + The frequency of the MoE layer: one expert layer for every `moe_layer_freq - 1` dense layers. + first_k_dense_replace (`int`, *optional*, defaults to 0): + Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head). + \--k dense layers--/ + norm_topk_prob (`bool`, *optional*, defaults to False): + Whether to normalize the weights of the routed experts. + scoring_func (`str`, *optional*, defaults to 'softmax'): + Method of computing expert weights. + aux_loss_alpha (`float`, *optional*, defaults to 0.001): + Auxiliary loss weight coefficient. + seq_aux = (`bool`, *optional*, defaults to True): + Whether to compute the auxiliary loss for each individual sample. + num_key_value_heads (`int`, *optional*): + This is the number of key_value heads that should be used to implement Grouped Query Attention. If + `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if + `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When + converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed + by meanpooling all the original heads within that group. For more details checkout [this + paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to + `num_attention_heads`. + hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): + The non-linear activation function (function or string) in the decoder. + max_position_embeddings (`int`, *optional*, defaults to 2048): + The maximum sequence length that this model might ever be used with. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + rms_norm_eps (`float`, *optional*, defaults to 1e-06): + The epsilon used by the rms normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + pad_token_id (`int`, *optional*): + Padding token id. + bos_token_id (`int`, *optional*, defaults to 1): + Beginning of stream token id. + eos_token_id (`int`, *optional*, defaults to 2): + End of stream token id. + pretraining_tp (`int`, *optional*, defaults to 1): + Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this + document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is + necessary to ensure exact reproducibility of the pretraining results. Please refer to [this + issue](https://github.com/pytorch/pytorch/issues/76232). + tie_word_embeddings (`bool`, *optional*, defaults to `False`): + Whether to tie weight embeddings + rope_theta (`float`, *optional*, defaults to 10000.0): + The base period of the RoPE embeddings. + rope_scaling (`Dict`, *optional*): + Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling + strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is + `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update + `max_position_embeddings` to the expected new maximum. + attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): + Whether to use a bias in the query, key, value and output projection layers during self-attention. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + ```python + >>> from transformers import DeepseekV2Model, DeepseekV2Config + >>> # Initializing a Deepseek-V2 style configuration + >>> configuration = DeepseekV2Config() + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "deepseek_v2" + keys_to_ignore_at_inference = ["past_key_values"] + + def __init__( + self, + vocab_size=102400, + hidden_size=4096, + intermediate_size=11008, + moe_intermediate_size = 1407, + num_hidden_layers=30, + num_attention_heads=32, + num_key_value_heads=32, + n_shared_experts = None, + n_routed_experts = None, + ep_size = 1, + routed_scaling_factor = 1.0, + kv_lora_rank = 512, + q_lora_rank = 1536, + qk_rope_head_dim = 64, + v_head_dim = 128, + qk_nope_head_dim = 128, + topk_method = 'gready', + n_group = None, + topk_group = None, + num_experts_per_tok = None, + moe_layer_freq = 1, + first_k_dense_replace = 0, + norm_topk_prob = False, + scoring_func = 'softmax', + aux_loss_alpha = 0.001, + seq_aux = True, + hidden_act="silu", + max_position_embeddings=2048, + initializer_range=0.02, + rms_norm_eps=1e-6, + use_cache=True, + pad_token_id=None, + bos_token_id=100000, + eos_token_id=100001, + pretraining_tp=1, + tie_word_embeddings=False, + rope_theta=10000.0, + rope_scaling=None, + attention_bias=False, + attention_dropout=0.0, + cpu_quant=None, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.moe_intermediate_size = moe_intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.n_shared_experts = n_shared_experts + self.n_routed_experts = n_routed_experts + self.ep_size = ep_size + self.routed_scaling_factor = routed_scaling_factor + self.kv_lora_rank = kv_lora_rank + self.q_lora_rank = q_lora_rank + self.qk_rope_head_dim = qk_rope_head_dim + self.v_head_dim = v_head_dim + self.qk_nope_head_dim = qk_nope_head_dim + self.topk_method = topk_method + self.n_group = n_group + self.topk_group = topk_group + self.num_experts_per_tok = num_experts_per_tok + self.moe_layer_freq = moe_layer_freq + self.first_k_dense_replace = first_k_dense_replace + self.norm_topk_prob = norm_topk_prob + self.scoring_func = scoring_func + self.aux_loss_alpha = aux_loss_alpha + self.seq_aux = seq_aux + # for backward compatibility + if num_key_value_heads is None: + num_key_value_heads = num_attention_heads + + self.num_key_value_heads = num_key_value_heads + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.rms_norm_eps = rms_norm_eps + self.pretraining_tp = pretraining_tp + self.use_cache = use_cache + self.rope_theta = rope_theta + self.rope_scaling = rope_scaling + self.attention_bias = attention_bias + self.attention_dropout = attention_dropout + + self.cpu_quant = cpu_quant + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) diff --git a/ktransformers/models/custom_cache.py b/ktransformers/models/custom_cache.py new file mode 100644 index 0000000..385e6ec --- /dev/null +++ b/ktransformers/models/custom_cache.py @@ -0,0 +1,128 @@ +''' +Description : +Author : Boxin Zhang +Version : 0.1.0 +''' +# Adapted from +# https://github.com/huggingface/transformers/blob/v4.41.2/src/transformers/cache_utils.py +# Copyright 2018- The Hugging Face team. All rights reserved. +# Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +import torch +import transformers +from transformers import Cache, PretrainedConfig +from typing import List, Optional, Dict, Any, Tuple +class StaticCache(transformers.StaticCache): + """ + Static Cache class to be used with `torch.compile(model)`. + + Parameters: + config (`PretrainedConfig): + The configuration file defining the shape-related attributes required to initialize the static cache. + max_batch_size (`int`): + The maximum batch size with which the model will be used. + max_cache_len (`int`): + The maximum sequence length with which the model will be used. + device (`torch.device`): + The device on which the cache should be initialized. Should be the same as the layer. + dtype (*optional*, defaults to `torch.float32`): + The default `dtype` to use when initializing the layer. + """ + + def __init__(self, config: PretrainedConfig, max_batch_size: int, max_cache_len: int, device, dtype=None) -> None: + Cache.__init__(self) + self.max_batch_size = max_batch_size + self.max_cache_len = config.max_position_embeddings if max_cache_len is None else max_cache_len + # Some model define a custom `head_dim` != config.hidden_size // config.num_attention_heads + self.head_dim = ( + config.head_dim if hasattr(config, "head_dim") else config.hidden_size // config.num_attention_heads + ) + + self.dtype = dtype if dtype is not None else torch.float32 + self.num_key_value_heads = ( + config.num_attention_heads if config.num_key_value_heads is None else config.num_key_value_heads + ) + + self.key_cache: List[torch.Tensor] = [] + self.value_cache: List[torch.Tensor] = [] + cache_shape = (max_batch_size, self.num_key_value_heads, self.max_cache_len, self.head_dim) + if config.architectures[0] == "DeepseekV2ForCausalLM": + # key_shape = (max_batch_size, self.num_key_value_heads, self.max_cache_len, config.qk_rope_head_dim + config.qk_nope_head_dim) + # value_shape = (max_batch_size, self.num_key_value_heads, self.max_cache_len, config.v_head_dim) + key_shape = (max_batch_size, 1, self.max_cache_len, config.qk_rope_head_dim) + value_shape = (max_batch_size, 1, self.max_cache_len, config.kv_lora_rank) + else: + key_shape = cache_shape + value_shape = cache_shape + + self.past_tokens = [] + self.num_hidden_layers = config.num_hidden_layers + for _ in range(self.num_hidden_layers): + # Note: `mark_static_address` is used to tag the cache as an fixed data pointer, preventing cuda graph + # breaks when updating the cache. + new_layer_key_cache = torch.zeros(key_shape, dtype=self.dtype, device=device) + new_layer_value_cache = torch.zeros(value_shape, dtype=self.dtype, device=device) + torch._dynamo.mark_static_address(new_layer_key_cache) + torch._dynamo.mark_static_address(new_layer_value_cache) + self.key_cache.append(new_layer_key_cache) + self.value_cache.append(new_layer_value_cache) + self.past_tokens.append(0) + + def update( + self, + key_states: torch.Tensor, + value_states: torch.Tensor, + layer_idx: int, + cache_kwargs: Optional[Dict[str, Any]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`. + It is VERY important to index using a tensor, otherwise you introduce a copy to the device. + + Parameters: + key_states (`torch.Tensor`): + The new key states to cache. + value_states (`torch.Tensor`): + The new value states to cache. + layer_idx (`int`): + The index of the layer to cache the states for. + cache_kwargs (`Dict[str, Any]`, `optional`): + Additional arguments for the cache subclass. The `StaticCache` needs the `cache_position` input + to know how where to write in the cache. + + Return: + A tuple containing the updated key and value states. + """ + cache_position = cache_kwargs.get("cache_position") + k_out = self.key_cache[layer_idx] + v_out = self.value_cache[layer_idx] + #print(cache_position) + k_out[:, :, cache_position] = key_states + v_out[:, :, cache_position] = value_states + self.past_tokens[layer_idx] += cache_position.size(0) + return k_out, v_out + + def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: + """Returns the sequence length of the cached states that were seen by the model.""" + # Occupied cache == any slot in the 3rd dim (sequence length) holds a non-zero value. To save on compute, let's + # limit the check to the first batch member and head dimension. + # TODO: deprecate this function in favor of `cache_position` + return self.past_tokens[layer_idx] + + def change_seq_length(self, bias: Optional[int] = 0) -> int: + """Returns the sequence length of the cached states that were seen by the model.""" + # Occupied cache == any slot in the 3rd dim (sequence length) holds a non-zero value. To save on compute, let's + # limit the check to the first batch member and head dimension. + # TODO: deprecate this function in favor of `cache_position` + for layer_idx in range(self.num_hidden_layers): + self.past_tokens[layer_idx] += bias + + def get_max_length(self) -> Optional[int]: + """Returns the maximum sequence length of the cached states.""" + return self.max_cache_len + + def reset(self): + """Resets the cache values while preserving the objects""" + for layer_idx in range(len(self.key_cache)): + # In-place ops prevent breaking the static address + self.key_cache[layer_idx].zero_() + self.value_cache[layer_idx].zero_() diff --git a/ktransformers/models/modeling_deepseek.py b/ktransformers/models/modeling_deepseek.py new file mode 100644 index 0000000..81fee86 --- /dev/null +++ b/ktransformers/models/modeling_deepseek.py @@ -0,0 +1,1990 @@ +# coding=utf-8 +''' +Description : +Author : Boxin Zhang +Version : 0.1.0 +''' +# Adapted from +# https://huggingface.co/deepseek-ai/DeepSeek-V2-Chat-0628/blob/main/modeling_deepseek.py +# Copyright 2023 DeepSeek-AI and The HuggingFace Inc. team. All rights reserved. +# Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch DeepSeek model.""" +import math +import warnings +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.cache_utils import Cache, DynamicCache, StaticCache +from transformers.modeling_attn_mask_utils import ( + AttentionMaskConverter, + _prepare_4d_attention_mask, + _prepare_4d_causal_attention_mask, +) +from transformers.modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, + SequenceClassifierOutputWithPast, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.pytorch_utils import ( + ALL_LAYERNORM_LAYERS, + is_torch_greater_or_equal_than_1_13, +) +from transformers.utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_2_available, + is_flash_attn_greater_or_equal_2_10, + logging, + replace_return_docstrings, +) +from transformers.utils.import_utils import is_torch_fx_available +from .configuration_deepseek import DeepseekV2Config +import torch.distributed as dist +import numpy as np + +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func, flash_attn_with_kvcache + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + +# This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph. +# It means that the function will not be traced through and simply appear as a node in the graph. +if is_torch_fx_available(): + if not is_torch_greater_or_equal_than_1_13: + import torch.fx + + _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask) + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "DeepseekV2Config" + + +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad( + torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0) + ) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +class DeepseekV2RMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + DeepseekV2RMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return (self.weight * hidden_states).to(input_dtype) + + +ALL_LAYERNORM_LAYERS.append(DeepseekV2RMSNorm) + +# Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->DeepseekV2 +class DeepseekV2RotaryEmbedding(nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + super().__init__() + self.scaling_factor = scaling_factor + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + # For BC we register cos and sin cached + self.max_seq_len_cached = max_position_embeddings + + @torch.no_grad() + def forward(self, x, position_ids): + # x: [bs, num_attention_heads, seq_len, head_size] + inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) + position_ids_expanded = position_ids[:, None, :].float() + # Force float32 since bfloat16 loses precision on long contexts + # See https://github.com/huggingface/transformers/pull/29285 + device_type = x.device.type + device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" + with torch.autocast(device_type=device_type, enabled=False): + freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) + emb = torch.cat((freqs, freqs), dim=-1) + cos = emb.cos() + sin = emb.sin() + return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) + +# Copied from transformers.models.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->DeepseekV2 +class DeepseekV2LinearScalingRotaryEmbedding(DeepseekV2RotaryEmbedding): + """DeepseekV2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" + + def __init__( + self, + dim, + max_position_embeddings=2048, + base=10000, + device=None, + scaling_factor=1.0, + ): + raise NotImplementedError("LinearScalingRotaryEmbedding is not supported now.") + self.scaling_factor = scaling_factor + super().__init__(dim, max_position_embeddings, base, device) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange( + self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype + ) + t = t / self.scaling_factor + + freqs = torch.outer(t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + +# Copied from transformers.models.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->DeepseekV2 +class DeepseekV2DynamicNTKScalingRotaryEmbedding(DeepseekV2RotaryEmbedding): + """DeepseekV2RotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" + + def __init__( + self, + dim, + max_position_embeddings=2048, + base=10000, + device=None, + scaling_factor=1.0, + ): + raise NotImplementedError("DynamicNTKScalingRotaryEmbedding is not supported now.") + self.scaling_factor = scaling_factor + super().__init__(dim, max_position_embeddings, base, device) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + + if seq_len > self.max_position_embeddings: + base = self.base * ( + (self.scaling_factor * seq_len / self.max_position_embeddings) + - (self.scaling_factor - 1) + ) ** (self.dim / (self.dim - 2)) + inv_freq = 1.0 / ( + base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim) + ) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + t = torch.arange( + self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype + ) + + freqs = torch.outer(t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + +# Inverse dim formula to find dim based on number of rotations +def yarn_find_correction_dim( + num_rotations, dim, base=10000, max_position_embeddings=2048 +): + return (dim * math.log(max_position_embeddings / (num_rotations * 2 * math.pi))) / ( + 2 * math.log(base) + ) + + +# Find dim range bounds based on rotations +def yarn_find_correction_range( + low_rot, high_rot, dim, base=10000, max_position_embeddings=2048 +): + low = math.floor( + yarn_find_correction_dim(low_rot, dim, base, max_position_embeddings) + ) + high = math.ceil( + yarn_find_correction_dim(high_rot, dim, base, max_position_embeddings) + ) + return max(low, 0), min(high, dim - 1) # Clamp values just in case + + +def yarn_get_mscale(scale=1, mscale=1): + if scale <= 1: + return 1.0 + return 0.1 * mscale * math.log(scale) + 1.0 + + +def yarn_linear_ramp_mask(min, max, dim): + if min == max: + max += 0.001 # Prevent singularity + + linear_func = (torch.arange(dim, dtype=torch.float32) - min) / (max - min) + ramp_func = torch.clamp(linear_func, 0, 1) + return ramp_func + +class DeepseekV2YarnRotaryEmbedding(DeepseekV2RotaryEmbedding): + def __init__( + self, + dim, + max_position_embeddings=2048, + base=10000, + device=None, + scaling_factor=1.0, + original_max_position_embeddings=4096, + beta_fast=32, + beta_slow=1, + mscale=1, + mscale_all_dim=0, + ): + nn.Module.__init__(self) + self.scaling_factor = scaling_factor + self.original_max_position_embeddings = original_max_position_embeddings + self.beta_fast = beta_fast + self.beta_slow = beta_slow + self.mscale = mscale + self.mscale_all_dim = mscale_all_dim + self.scaling_factor = scaling_factor + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + + freq_extra = 1.0 / ( + self.base + ** (torch.arange(0, dim, 2, dtype=torch.float32, device=device) / dim) + ) + freq_inter = 1.0 / ( + self.scaling_factor + * self.base + ** (torch.arange(0, dim, 2, dtype=torch.float32, device=device) / dim) + ) + + low, high = yarn_find_correction_range( + self.beta_fast, + self.beta_slow, + dim, + self.base, + self.original_max_position_embeddings, + ) + inv_freq_mask = 1.0 - yarn_linear_ramp_mask(low, high, dim // 2).to( + device=device, dtype=torch.float32 + ) + inv_freq = freq_inter * (1 - inv_freq_mask) + freq_extra * inv_freq_mask + self.register_buffer("inv_freq", inv_freq, persistent=False) + self._mscale = float( + yarn_get_mscale(self.scaling_factor, self.mscale) + / yarn_get_mscale(self.scaling_factor, self.mscale_all_dim) + ) + # For BC we register cos and sin cached + self.max_seq_len_cached = max_position_embeddings + + @torch.no_grad() + def forward(self, x, position_ids): + # x: [bs, num_attention_heads, seq_len, head_size] + inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) + position_ids_expanded = position_ids[:, None, :].float() + # Force float32 since bfloat16 loses precision on long contexts + # See https://github.com/huggingface/transformers/pull/29285 + device_type = x.device.type + device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" + with torch.autocast(device_type=device_type, enabled=False): + freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) + emb = torch.cat((freqs, freqs), dim=-1) + cos = emb.cos()* self._mscale + sin = emb.sin()* self._mscale + return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) + +# Copied from transformers.models.llama.modeling_llama.rotate_half +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +# Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb +def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`): + Deprecated and unused. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos.unsqueeze(unsqueeze_dim) + sin = sin.unsqueeze(unsqueeze_dim) + b, h, s, d = q.shape + q = q.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d) + b, h, s, d = k.shape + k = k.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + +class DeepseekV2MLP(nn.Module): + def __init__(self, config, hidden_size=None, intermediate_size=None): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size if hidden_size is None else hidden_size + self.intermediate_size = ( + config.intermediate_size if intermediate_size is None else intermediate_size + ) + + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) + self.act_fn = ACT2FN[config.hidden_act] + + def forward(self, x): + act = self.act_fn(self.gate_proj(x)) * self.up_proj(x) + down_proj = self.down_proj(act) + return down_proj + +class MoEGate(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.top_k = config.num_experts_per_tok + self.n_routed_experts = config.n_routed_experts + self.routed_scaling_factor = config.routed_scaling_factor + self.scoring_func = config.scoring_func + self.alpha = config.aux_loss_alpha + self.seq_aux = config.seq_aux + self.topk_method = config.topk_method + self.n_group = config.n_group + self.topk_group = config.topk_group + + # topk selection algorithm + self.norm_topk_prob = config.norm_topk_prob + self.gating_dim = config.hidden_size + self.weight = nn.Parameter( + torch.empty((self.n_routed_experts, self.gating_dim)) + ) + self.reset_parameters() + + def reset_parameters(self) -> None: + import torch.nn.init as init + + init.kaiming_uniform_(self.weight, a=math.sqrt(5)) + + def forward(self, hidden_states): + bsz, seq_len, h = hidden_states.shape + ### compute gating score + hidden_states = hidden_states.view(-1, h) + logits = F.linear( + hidden_states.type(torch.float32), self.weight.type(torch.float32), None + ) + if self.scoring_func == "softmax": + scores = logits.softmax(dim=-1, dtype=torch.float32) + else: + raise NotImplementedError( + f"insupportable scoring function for MoE gating: {self.scoring_func}" + ) + + ### select top-k experts + if self.topk_method == "greedy": + topk_weight, topk_idx = torch.topk( + scores, k=self.top_k, dim=-1, sorted=False + ) + elif self.topk_method == "group_limited_greedy": + group_scores = ( + scores.view(bsz * seq_len, self.n_group, -1).max(dim=-1).values + ) # [n, n_group] + group_idx = torch.topk( + group_scores, k=self.topk_group, dim=-1, sorted=False + )[ + 1 + ] # [n, top_k_group] + group_mask = torch.zeros_like(group_scores) # [n, n_group] + group_mask.scatter_(1, group_idx, 1) # [n, n_group] + score_mask = ( + group_mask.unsqueeze(-1) + .expand( + bsz * seq_len, self.n_group, self.n_routed_experts // self.n_group + ) + .reshape(bsz * seq_len, -1) + ) # [n, e] + tmp_scores = scores.masked_fill(~score_mask.bool(), 0.0) # [n, e] + topk_weight, topk_idx = torch.topk( + tmp_scores, k=self.top_k, dim=-1, sorted=False + ) + + ### norm gate to sum 1 + if self.top_k > 1 and self.norm_topk_prob: + denominator = topk_weight.sum(dim=-1, keepdim=True) + 1e-20 + topk_weight = topk_weight / denominator + else: + topk_weight = topk_weight * self.routed_scaling_factor + ### expert-level computation auxiliary loss + if self.training and self.alpha > 0.0: + scores_for_aux = scores + aux_topk = self.top_k + # always compute aux loss based on the naive greedy topk method + topk_idx_for_aux_loss = topk_idx.view(bsz, -1) + if self.seq_aux: + scores_for_seq_aux = scores_for_aux.view(bsz, seq_len, -1) + ce = torch.zeros( + bsz, self.n_routed_experts, device=hidden_states.device + ) + ce.scatter_add_( + 1, + topk_idx_for_aux_loss, + torch.ones(bsz, seq_len * aux_topk, device=hidden_states.device), + ).div_(seq_len * aux_topk / self.n_routed_experts) + aux_loss = (ce * scores_for_seq_aux.mean(dim=1)).sum( + dim=1 + ).mean() * self.alpha + else: + mask_ce = F.one_hot( + topk_idx_for_aux_loss.view(-1), num_classes=self.n_routed_experts + ) + ce = mask_ce.float().mean(0) + Pi = scores_for_aux.mean(0) + fi = ce * self.n_routed_experts + aux_loss = (Pi * fi).sum() * self.alpha + else: + aux_loss = None + return topk_idx, topk_weight, aux_loss + + +class AddAuxiliaryLoss(torch.autograd.Function): + """ + The trick function of adding auxiliary (aux) loss, + which includes the gradient of the aux loss during backpropagation. + """ + + @staticmethod + def forward(ctx, x, loss): + assert loss.numel() == 1 + ctx.dtype = loss.dtype + ctx.required_aux_loss = loss.requires_grad + return x + + @staticmethod + def backward(ctx, grad_output): + grad_loss = None + if ctx.required_aux_loss: + grad_loss = torch.ones(1, dtype=ctx.dtype, device=grad_output.device) + return grad_output, grad_loss + +class DeepseekV2MoE(nn.Module): + """ + A mixed expert module containing shared experts. + """ + + def __init__(self, config): + super().__init__() + self.config = config + self.num_experts_per_tok = config.num_experts_per_tok + + if hasattr(config, "ep_size") and config.ep_size > 1: + assert config.ep_size == dist.get_world_size() + self.ep_size = config.ep_size + self.experts_per_rank = config.n_routed_experts // config.ep_size + self.ep_rank = dist.get_rank() + self.experts = nn.ModuleList( + [ + ( + DeepseekV2MLP( + config, intermediate_size=config.moe_intermediate_size + ) + if i >= self.ep_rank * self.experts_per_rank + and i < (self.ep_rank + 1) * self.experts_per_rank + else None + ) + for i in range(config.n_routed_experts) + ] + ) + else: + self.ep_size = 1 + self.experts_per_rank = config.n_routed_experts + self.ep_rank = 0 + self.experts = nn.ModuleList( + [ + DeepseekV2MLP(config, intermediate_size=config.moe_intermediate_size) + for i in range(config.n_routed_experts) + ] + ) + self.gate = MoEGate(config) + if config.n_shared_experts is not None: + intermediate_size = config.moe_intermediate_size * config.n_shared_experts + self.shared_experts = DeepseekV2MLP( + config=config, intermediate_size=intermediate_size + ) + + def forward(self, hidden_states): + identity = hidden_states + orig_shape = hidden_states.shape + topk_idx, topk_weight, aux_loss = self.gate(hidden_states) + hidden_states = hidden_states.view(-1, hidden_states.shape[-1]) + flat_topk_idx = topk_idx.view(-1) + if self.training: + hidden_states = hidden_states.repeat_interleave( + self.num_experts_per_tok, dim=0 + ) + y = torch.empty_like(hidden_states) + for i, expert in enumerate(self.experts): + y[flat_topk_idx == i] = expert(hidden_states[flat_topk_idx == i]) + y = (y.view(*topk_weight.shape, -1) * topk_weight.unsqueeze(-1)).sum(dim=1) + y = y.view(*orig_shape) + y = AddAuxiliaryLoss.apply(y, aux_loss) + else: + y = self.moe_infer(hidden_states, topk_idx, topk_weight).view(*orig_shape) + if self.config.n_shared_experts is not None: + y = y + self.shared_experts(identity) + return y + + @torch.no_grad() + def moe_infer(self, x, topk_ids, topk_weight): + cnts = topk_ids.new_zeros((topk_ids.shape[0], len(self.experts))) + cnts.scatter_(1, topk_ids, 1) + tokens_per_expert = cnts.sum(dim=0) + idxs = topk_ids.view(-1).argsort() + sorted_tokens = x[idxs // topk_ids.shape[1]] + sorted_tokens_shape = sorted_tokens.shape + if self.ep_size > 1: + tokens_per_ep_rank = tokens_per_expert.view(self.ep_size, -1).sum(dim=1) + tokens_per_expert_group = tokens_per_expert.new_empty( + tokens_per_expert.shape[0] + ) + dist.all_to_all_single(tokens_per_expert_group, tokens_per_expert) + output_splits = ( + tokens_per_expert_group.view(self.ep_size, -1) + .sum(1) + .cpu() + .numpy() + .tolist() + ) + gathered_tokens = sorted_tokens.new_empty( + tokens_per_expert_group.sum(dim=0).cpu().item(), sorted_tokens.shape[1] + ) + input_split_sizes = tokens_per_ep_rank.cpu().numpy().tolist() + dist.all_to_all( + list(gathered_tokens.split(output_splits)), + list(sorted_tokens.split(input_split_sizes)), + ) + tokens_per_expert_post_gather = tokens_per_expert_group.view( + self.ep_size, self.experts_per_rank + ).sum(dim=0) + gatherd_idxs = np.zeros(shape=(gathered_tokens.shape[0],), dtype=np.int32) + s = 0 + for i, k in enumerate(tokens_per_expert_group.cpu().numpy()): + gatherd_idxs[s : s + k] = i % self.experts_per_rank + s += k + gatherd_idxs = gatherd_idxs.argsort() + sorted_tokens = gathered_tokens[gatherd_idxs] + tokens_per_expert = tokens_per_expert_post_gather + tokens_per_expert = tokens_per_expert.cpu().numpy() + + outputs = [] + start_idx = 0 + for i, num_tokens in enumerate(tokens_per_expert): + end_idx = start_idx + num_tokens + if num_tokens == 0: + continue + expert = self.experts[i + self.ep_rank * self.experts_per_rank] + tokens_for_this_expert = sorted_tokens[start_idx:end_idx] + expert_out = expert(tokens_for_this_expert) + outputs.append(expert_out) + start_idx = end_idx + + outs = torch.cat(outputs, dim=0) if len(outputs) else sorted_tokens.new_empty(0) + if self.ep_size > 1: + new_x = torch.empty_like(outs) + new_x[gatherd_idxs] = outs + gathered_tokens = new_x.new_empty(*sorted_tokens_shape) + dist.all_to_all( + list(gathered_tokens.split(input_split_sizes)), + list(new_x.split(output_splits)), + ) + outs = gathered_tokens + + new_x = torch.empty_like(outs) + new_x[idxs] = outs + final_out = ( + new_x.view(*topk_ids.shape, -1) + .type(topk_weight.dtype) + .mul_(topk_weight.unsqueeze(dim=-1)) + .sum(dim=1) + .type(new_x.dtype) + ) + return final_out + +# Copied from transformers.models.llama.modeling_llama.repeat_kv +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand( + batch, num_key_value_heads, n_rep, slen, head_dim + ) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + +# Copied from transformers.models.llama.modeling_llama.LlamaAttention with Llama->DeepseekV2 +class DeepseekV2Attention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: DeepseekV2Config, layer_idx: Optional[int] = None): + super().__init__() + self.config = config + self.layer_idx = layer_idx + if layer_idx is None: + logger.warning_once( + f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will " + "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " + "when creating this class." + ) + + self.attention_dropout = config.attention_dropout + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + + self.max_position_embeddings = config.max_position_embeddings + self.rope_theta = config.rope_theta + self.q_lora_rank = config.q_lora_rank + self.qk_rope_head_dim = config.qk_rope_head_dim + self.kv_lora_rank = config.kv_lora_rank + self.v_head_dim = config.v_head_dim + self.qk_nope_head_dim = config.qk_nope_head_dim + self.q_head_dim = config.qk_nope_head_dim + config.qk_rope_head_dim + + self.is_causal = True + + if self.q_lora_rank is None: + self.q_proj = nn.Linear( + self.hidden_size, self.num_heads * self.q_head_dim, bias=False + ) + else: + self.q_a_proj = nn.Linear( + self.hidden_size, config.q_lora_rank, bias=config.attention_bias + ) + self.q_a_layernorm = DeepseekV2RMSNorm(config.q_lora_rank) + self.q_b_proj = nn.Linear( + config.q_lora_rank, self.num_heads * self.q_head_dim, bias=False + ) + + self.kv_a_proj_with_mqa = nn.Linear( + self.hidden_size, + config.kv_lora_rank + config.qk_rope_head_dim, + bias=config.attention_bias, + ) + self.kv_a_layernorm = DeepseekV2RMSNorm(config.kv_lora_rank) + self.kv_b_proj = nn.Linear( + config.kv_lora_rank, + self.num_heads + * (self.q_head_dim - self.qk_rope_head_dim + self.v_head_dim), + bias=False, + ) + + self.o_proj = nn.Linear( + self.num_heads * self.v_head_dim, + self.hidden_size, + bias=config.attention_bias, + ) + self._init_rope() + + self.softmax_scale = self.q_head_dim ** (-0.5) + if self.config.rope_scaling is not None: + mscale_all_dim = self.config.rope_scaling.get("mscale_all_dim", 0) + scaling_factor = self.config.rope_scaling["factor"] + if mscale_all_dim: + mscale = yarn_get_mscale(scaling_factor, mscale_all_dim) + self.softmax_scale = self.softmax_scale * mscale * mscale + + def _init_rope(self): + if self.config.rope_scaling is None: + self.rotary_emb = DeepseekV2RotaryEmbedding( + self.qk_rope_head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) + else: + scaling_type = self.config.rope_scaling["type"] + scaling_factor = self.config.rope_scaling["factor"] + if scaling_type == "linear": + self.rotary_emb = DeepseekV2LinearScalingRotaryEmbedding( + self.qk_rope_head_dim, + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor, + base=self.rope_theta, + ) + elif scaling_type == "dynamic": + self.rotary_emb = DeepseekV2DynamicNTKScalingRotaryEmbedding( + self.qk_rope_head_dim, + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor, + base=self.rope_theta, + ) + elif scaling_type == "yarn": + kwargs = { + key: self.config.rope_scaling[key] + for key in [ + "original_max_position_embeddings", + "beta_fast", + "beta_slow", + "mscale", + "mscale_all_dim", + ] + if key in self.config.rope_scaling + } + self.rotary_emb = DeepseekV2YarnRotaryEmbedding( + self.qk_rope_head_dim, + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor, + base=self.rope_theta, + **kwargs, + ) + else: + raise ValueError(f"Unknown RoPE scaling type {scaling_type}") + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return ( + tensor.view(bsz, seq_len, self.num_heads, self.v_head_dim) + .transpose(1, 2) + .contiguous() + ) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + bsz, q_len, _ = hidden_states.size() + + if self.q_lora_rank is None: + q = self.q_proj(hidden_states) + else: + q = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states))) + q = q.view(bsz, q_len, self.num_heads, self.q_head_dim).transpose(1, 2) + q_nope, q_pe = torch.split( + q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1 + ) + + compressed_kv = self.kv_a_proj_with_mqa(hidden_states) + compressed_kv, k_pe = torch.split( + compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1 + ) + k_pe = k_pe.view(bsz, q_len, 1, self.qk_rope_head_dim).transpose(1, 2) + kv = ( + self.kv_b_proj(self.kv_a_layernorm(compressed_kv)) + .view(bsz, q_len, self.num_heads, self.qk_nope_head_dim + self.v_head_dim) + .transpose(1, 2) + ) + + k_nope, value_states = torch.split( + kv, [self.qk_nope_head_dim, self.v_head_dim], dim=-1 + ) + kv_seq_len = value_states.shape[-2] + if past_key_value is not None: + if self.layer_idx is None: + raise ValueError( + f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " + "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " + "with a layer index." + ) + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + cos, sin = self.rotary_emb(q_pe, position_ids) + q_pe, k_pe = apply_rotary_pos_emb(q_pe, k_pe, cos, sin) + + query_states = k_pe.new_empty(bsz, self.num_heads, q_len, self.q_head_dim) + query_states[:, :, :, : self.qk_nope_head_dim] = q_nope + query_states[:, :, :, self.qk_nope_head_dim :] = q_pe + + key_states = k_pe.new_empty(bsz, self.num_heads, q_len, self.q_head_dim) + key_states[:, :, :, : self.qk_nope_head_dim] = k_nope + key_states[:, :, :, self.qk_nope_head_dim :] = k_pe + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models + key_states, value_states = past_key_value.update( + key_states, value_states, self.layer_idx, cache_kwargs + ) + + attn_weights = ( + torch.matmul(query_states, key_states.transpose(2, 3)) * self.softmax_scale + ) + + if attention_mask is not None: + attn_weights = attn_weights + attention_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax( + attn_weights, dim=-1, dtype=torch.float32 + ).to(query_states.dtype) + attn_weights = nn.functional.dropout( + attn_weights, p=self.attention_dropout, training=self.training + ) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.v_head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.v_head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + + attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.v_head_dim) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + +# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2 with Llama->DeepseekV2 +class DeepseekV2FlashAttention2(DeepseekV2Attention): + """ + DeepseekV2 flash attention module. This module inherits from `DeepseekV2Attention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + # DeepseekV2FlashAttention2 attention does not support output_attentions + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + + # overwrite attention_mask with padding_mask + attention_mask = kwargs.pop("padding_mask") + + output_attentions = False + + bsz, q_len, _ = hidden_states.size() + + q = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states))) + q = q.view(bsz, q_len, self.num_heads, self.q_head_dim).transpose(1, 2) + q_nope, q_pe = torch.split( + q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1 + ) + + # Flash attention requires the input to have the shape + # batch_size x seq_length x head_dim x hidden_dim + # therefore we just need to keep the original shape + compressed_kv = self.kv_a_proj_with_mqa(hidden_states) + compressed_kv, k_pe = torch.split( + compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1 + ) + k_pe = k_pe.view(bsz, q_len, 1, self.qk_rope_head_dim).transpose(1, 2) + kv = ( + self.kv_b_proj(self.kv_a_layernorm(compressed_kv)) + .view(bsz, q_len, self.num_heads, self.qk_nope_head_dim + self.v_head_dim) + .transpose(1, 2) + ) + + k_nope, value_states = torch.split( + kv, [self.qk_nope_head_dim, self.v_head_dim], dim=-1 + ) + kv_seq_len = value_states.shape[-2] + + kv_seq_len = value_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + + cos, sin = self.rotary_emb(q_pe, position_ids) + q_pe, k_pe = apply_rotary_pos_emb(q_pe, k_pe, cos, sin) + + query_states = k_pe.new_empty(bsz, self.num_heads, q_len, self.q_head_dim) + query_states[:, :, :, : self.qk_nope_head_dim] = q_nope + query_states[:, :, :, self.qk_nope_head_dim :] = q_pe + + key_states = k_pe.new_empty(bsz, self.num_heads, q_len, self.q_head_dim) + key_states[:, :, :, : self.qk_nope_head_dim] = k_nope + key_states[:, :, :, self.qk_nope_head_dim :] = k_pe + + if self.q_head_dim != self.v_head_dim: + value_states = F.pad(value_states, [0, self.q_head_dim - self.v_head_dim]) + + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models + key_states, value_states = past_key_value.update( + key_states, value_states, self.layer_idx, cache_kwargs + ) + + # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache + # to be able to avoid many of these transpose/reshape/view. + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + dropout_rate = self.attention_dropout if self.training else 0.0 + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in the correct dtype just to be sure everything works as expected. + # This might slowdown training & inference so it is recommended to not cast the LayerNorms + # in fp32. (DeepseekV2RMSNorm handles it correctly) + + input_dtype = query_states.dtype + if input_dtype == torch.float32: + # Handle the case where the model is quantized + if hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + elif torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + else: + target_dtype = self.q_a_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + attn_output = self._flash_attention_forward( + query_states, + key_states, + value_states, + attention_mask, + q_len, + position_ids=position_ids, + dropout=dropout_rate, + softmax_scale=self.softmax_scale, + ) + if self.q_head_dim != self.v_head_dim: + attn_output = attn_output[:, :, :, : self.v_head_dim] + + attn_output = attn_output.reshape( + bsz, q_len, self.num_heads * self.v_head_dim + ).contiguous() + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + def _flash_attention_forward( + self, + query_states, + key_states, + value_states, + attention_mask, + query_length, + position_ids, + dropout=0.0, + softmax_scale=None, + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`int`, *optional*): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + if not self._flash_attn_uses_top_left_mask: + causal = self.is_causal + else: + # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in DeepseekV2FlashAttention2 __init__. + causal = self.is_causal and query_length != 1 + + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + ( + query_states, + key_states, + value_states, + indices_q, + cu_seq_lens, + max_seq_lens, + ) = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + + attn_output = pad_input( + attn_output_unpad, indices_q, batch_size, query_length + ) + else: + if query_length == 1: + position_ids = position_ids.to(dtype=torch.int32).squeeze(1) + attn_output = flash_attn_with_kvcache( + query_states, + key_states, + value_states, + cache_seqlens=position_ids, + softmax_scale=softmax_scale, + causal=causal, + ) + else: + attn_output = flash_attn_func( + query_states, + key_states, + value_states, + dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + + return attn_output + + def _upad_input( + self, query_layer, key_layer, value_layer, attention_mask, query_length + ): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), + indices_k, + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), + indices_k, + ) + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), + indices_k, + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input( + query_layer, attention_mask + ) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +ATTENTION_CLASSES = { + "eager": DeepseekV2Attention, + "flash_attention_2": DeepseekV2FlashAttention2, +} + +class DeepseekV2DecoderLayer(nn.Module): + def __init__(self, config: DeepseekV2Config, layer_idx: int): + super().__init__() + self.hidden_size = config.hidden_size + + self.self_attn = ATTENTION_CLASSES[config._attn_implementation]( + config=config, layer_idx=layer_idx + ) + + self.mlp = ( + DeepseekV2MoE(config) + if ( + config.n_routed_experts is not None + and layer_idx >= config.first_k_dense_replace + and layer_idx % config.moe_layer_freq == 0 + ) + else DeepseekV2MLP(config) + ) + self.input_layernorm = DeepseekV2RMSNorm( + config.hidden_size, eps=config.rms_norm_eps + ) + self.post_attention_layernorm = DeepseekV2RMSNorm( + config.hidden_size, eps=config.rms_norm_eps + ) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[ + torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] + ]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): + attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, + query_sequence_length, key_sequence_length)` if default attention is used. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + **kwargs, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +DeepseekV2_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`DeepseekV2Config`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare DeepseekV2 Model outputting raw hidden-states without any specific head on top.", + DeepseekV2_START_DOCSTRING, +) +class DeepseekV2PreTrainedModel(PreTrainedModel): + config_class = DeepseekV2Config + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["DeepseekV2DecoderLayer"] + _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True + _supports_cache_class = True + _supports_static_cache = True + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +DeepseekV2_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): + Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` + returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. + + Two formats are allowed: + - a [`~cache_utils.Cache`] instance; + - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy + cache format. + + The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the + legacy cache format will be returned. + + If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't + have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` + of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare DeepseekV2 Model outputting raw hidden-states without any specific head on top.", + DeepseekV2_START_DOCSTRING, +) +class DeepseekV2Model(DeepseekV2PreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`DeepseekV2DecoderLayer`] + + Args: + config: DeepseekV2Config + """ + + def __init__(self, config: DeepseekV2Config): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding( + config.vocab_size, config.hidden_size, self.padding_idx + ) + self.layers = nn.ModuleList( + [ + DeepseekV2DecoderLayer(config, layer_idx) + for layer_idx in range(config.num_hidden_layers) + ] + ) + self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" + self.norm = DeepseekV2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + @add_start_docstrings_to_model_forward(DeepseekV2_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError( + "You cannot specify both input_ids and inputs_embeds at the same time" + ) + elif input_ids is not None: + batch_size, seq_length = input_ids.shape[:2] + elif inputs_embeds is not None: + batch_size, seq_length = inputs_embeds.shape[:2] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`transformers." + ) + use_cache = False + + past_key_values_length = 0 + if use_cache: + use_legacy_cache = not isinstance(past_key_values, Cache) + if use_legacy_cache: + past_key_values = DynamicCache.from_legacy_cache(past_key_values) + past_key_values_length = past_key_values.get_usable_length(seq_length) + + if cache_position is None: + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + ) + + if position_ids is None: + position_ids = cache_position.unsqueeze(0) + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + causal_mask = self._update_causal_mask( + attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions + ) + + # embed positions + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = None + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + causal_mask, + position_ids, + past_key_values, + output_attentions, + use_cache, + cache_position, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=causal_mask, + position_ids=position_ids, + past_key_value=past_key_values, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache = layer_outputs[2 if output_attentions else 1] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = None + if use_cache: + next_cache = ( + next_decoder_cache.to_legacy_cache() + if use_legacy_cache + else next_decoder_cache + ) + if not return_dict: + return tuple( + v + for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] + if v is not None + ) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask + def _update_causal_mask( + self, + attention_mask: torch.Tensor, + input_tensor: torch.Tensor, + cache_position: torch.Tensor, + past_key_values: Cache, + output_attentions: bool, + ): + # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static + # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes. + # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using + # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114 + + if self.config._attn_implementation == "flash_attention_2": + if attention_mask is not None and 0.0 in attention_mask: + return attention_mask + return None + + # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in + # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail + # to infer the attention mask. + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + using_static_cache = isinstance(past_key_values, StaticCache) + + # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward + if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: + if AttentionMaskConverter._ignore_causal_mask_sdpa( + attention_mask, + inputs_embeds=input_tensor, + past_key_values_length=past_seen_tokens, + is_training=self.training, + ): + return None + + dtype, device = input_tensor.dtype, input_tensor.device + min_dtype = torch.finfo(dtype).min + sequence_length = input_tensor.shape[1] + if using_static_cache: + target_length = past_key_values.get_max_length() + else: + target_length = ( + attention_mask.shape[-1] + if isinstance(attention_mask, torch.Tensor) + else past_seen_tokens + sequence_length + 1 + ) + + if attention_mask is not None and attention_mask.dim() == 4: + # in this case we assume that the mask comes already in inverted form and requires no inversion or slicing + if attention_mask.max() != 0: + raise ValueError("Custom 4D attention mask should be passed in inverted form with max==0`") + causal_mask = attention_mask + else: + causal_mask = torch.full( + (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device + ) + if sequence_length != 1: + causal_mask = torch.triu(causal_mask, diagonal=1) + causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) + causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1) + if attention_mask is not None: + causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit + mask_length = attention_mask.shape[-1] + padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] + padding_mask = padding_mask == 0 + causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( + padding_mask, min_dtype + ) + if ( + self.config._attn_implementation == "sdpa" + and attention_mask is not None + and attention_mask.device.type == "cuda" + and not output_attentions + ): + # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when + # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. + # Details: https://github.com/pytorch/pytorch/issues/110213 + causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) + + return causal_mask + + +class DeepseekV2ForCausalLM(DeepseekV2PreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.model = DeepseekV2Model(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + @add_start_docstrings_to_model_forward(DeepseekV2_INPUTS_DOCSTRING) + @replace_return_docstrings( + output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC + ) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, transformers., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, transformers., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, DeepseekV2ForCausalLM + + >>> model = DeepseekV2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) + >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + cache_position=cache_position, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + logits = logits[:,-1,:].unsqueeze(0).float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, + input_ids, + past_key_values=None, + attention_mask=None, + inputs_embeds=None, + cache_position=None, + use_cache=True, + **kwargs, + ): + past_length = 0 + # Omit tokens covered by past_key_values + if past_key_values is not None: + if isinstance(past_key_values, Cache): + past_length = cache_position[0] if cache_position is not None else past_key_values.get_seq_length() + max_cache_length = ( + torch.tensor(past_key_values.get_max_length(), device=input_ids.device) + if past_key_values.get_max_length() is not None + else None + ) + cache_length = past_length if max_cache_length is None else torch.min(max_cache_length, past_length) + # TODO joao: remove this `else` after `generate` prioritizes `Cache` objects + else: + cache_length = past_length = past_key_values[0][0].shape[2] + max_cache_length = None + + # Keep only the unprocessed tokens: + # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where + # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as + # input) + if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]: + input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :] + # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard + # input_ids based on the past_length. + elif past_length < input_ids.shape[1]: + input_ids = input_ids[:, past_length:] + # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. + + # If we are about to go beyond the maximum cache length, we need to crop the input attention mask. + if ( + max_cache_length is not None + and attention_mask is not None + and cache_length + input_ids.shape[1] > max_cache_length + ): + attention_mask = attention_mask[:, -max_cache_length:] + + position_ids = kwargs.get("position_ids", None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_length == 0: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1] + if cache_position is None: + cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device) + elif use_cache: + cache_position = cache_position[-input_length:] + + model_inputs.update( + { + "position_ids": position_ids, + "past_key_values": past_key_values, + "use_cache": use_cache, + "attention_mask": attention_mask, + "cache_position": cache_position, + } + ) + return model_inputs + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple( + past_state.index_select(0, beam_idx.to(past_state.device)) + for past_state in layer_past + ), + ) + return reordered_past + + +@add_start_docstrings( + """ + The DeepseekV2 Model transformer with a sequence classification head on top (linear layer). + + [`DeepseekV2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + DeepseekV2_START_DOCSTRING, +) +class DeepseekV2ForSequenceClassification(DeepseekV2PreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = DeepseekV2Model(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(DeepseekV2_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, transformers., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + transformer_outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError( + "Cannot handle batch sizes > 1 if no padding token is defined." + ) + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + sequence_lengths = ( + torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 + ).to(logits.device) + else: + sequence_lengths = -1 + + pooled_logits = logits[ + torch.arange(batch_size, device=logits.device), sequence_lengths + ] + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and ( + labels.dtype == torch.long or labels.dtype == torch.int + ): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct( + pooled_logits.view(-1, self.num_labels), labels.view(-1) + ) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) diff --git a/ktransformers/models/modeling_qwen2_moe.py b/ktransformers/models/modeling_qwen2_moe.py new file mode 100644 index 0000000..4c66cef --- /dev/null +++ b/ktransformers/models/modeling_qwen2_moe.py @@ -0,0 +1,1765 @@ +# coding=utf-8 +''' +Description : +Author : Boxin Zhang +Version : 0.1.0 +''' +# Adapted from +# https://github.com/huggingface/transformers/blob/v4.42.3/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py +# Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved. +# Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch Qwen2MoE model.""" + +import inspect +import math +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.cache_utils import Cache, DynamicCache, StaticCache +from transformers.modeling_attn_mask_utils import ( + AttentionMaskConverter, +) +from transformers.modeling_outputs import ( + MoeCausalLMOutputWithPast, + MoeModelOutputWithPast, + SequenceClassifierOutputWithPast, + TokenClassifierOutput, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_2_available, + is_flash_attn_greater_or_equal_2_10, + logging, + replace_return_docstrings, +) +from transformers.models.qwen2_moe.configuration_qwen2_moe import Qwen2MoeConfig + + +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func, flash_attn_with_kvcache + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters) + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "Qwen/Qwen1.5-MoE-A2.7B" +_CONFIG_FOR_DOC = "Qwen2MoeConfig" + + +# Copied from transformers.models.mixtral.modeling_mixtral.load_balancing_loss_func +def load_balancing_loss_func( + gate_logits: torch.Tensor, num_experts: torch.Tensor = None, top_k=2, attention_mask: Optional[torch.Tensor] = None +) -> float: + r""" + Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. + + See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss + function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between + experts is too unbalanced. + + Args: + gate_logits (Union[`torch.Tensor`, Tuple[torch.Tensor]): + Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of + shape [batch_size X sequence_length, num_experts]. + attention_mask (`torch.Tensor`, None): + The attention_mask used in forward function + shape [batch_size X sequence_length] if not None. + num_experts (`int`, *optional*): + Number of experts + + Returns: + The auxiliary loss. + """ + if gate_logits is None or not isinstance(gate_logits, tuple): + return 0 + + if isinstance(gate_logits, tuple): + compute_device = gate_logits[0].device + concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0) + + routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1) + + _, selected_experts = torch.topk(routing_weights, top_k, dim=-1) + + expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts) + + if attention_mask is None: + # Compute the percentage of tokens routed to each experts + tokens_per_expert = torch.mean(expert_mask.float(), dim=0) + + # Compute the average probability of routing to these experts + router_prob_per_expert = torch.mean(routing_weights, dim=0) + else: + batch_size, sequence_length = attention_mask.shape + num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length) + + # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask + expert_attention_mask = ( + attention_mask[None, :, :, None, None] + .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts)) + .reshape(-1, top_k, num_experts) + .to(compute_device) + ) + + # Compute the percentage of tokens routed to each experts + tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum( + expert_attention_mask, dim=0 + ) + + # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert + router_per_expert_attention_mask = ( + attention_mask[None, :, :, None] + .expand((num_hidden_layers, batch_size, sequence_length, num_experts)) + .reshape(-1, num_experts) + .to(compute_device) + ) + + # Compute the average probability of routing to these experts + router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum( + router_per_expert_attention_mask, dim=0 + ) + + overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0)) + return overall_loss * num_experts + + +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Qwen2Moe +class Qwen2MoeRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + Qwen2MoeRMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return self.weight * hidden_states.to(input_dtype) + +# Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->Qwen2Moe +class Qwen2MoeRotaryEmbedding(nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + super().__init__() + self.scaling_factor = scaling_factor + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + # For BC we register cos and sin cached + self.max_seq_len_cached = max_position_embeddings + + @torch.no_grad() + def forward(self, x, position_ids): + # x: [bs, num_attention_heads, seq_len, head_size] + inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) + position_ids_expanded = position_ids[:, None, :].float() + # Force float32 since bfloat16 loses precision on long contexts + # See https://github.com/huggingface/transformers/pull/29285 + device_type = x.device.type + device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" + with torch.autocast(device_type=device_type, enabled=False): + freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) + emb = torch.cat((freqs, freqs), dim=-1) + cos = emb.cos() + sin = emb.sin() + return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) + + +# Copied from transformers.models.llama.modeling_llama.rotate_half +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +# Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb +def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`): + Deprecated and unused. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos.unsqueeze(unsqueeze_dim) + sin = sin.unsqueeze(unsqueeze_dim) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +# Modified from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Qwen2Moe +class Qwen2MoeMLP(nn.Module): + def __init__(self, config, intermediate_size=None): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.intermediate_size = intermediate_size + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) + self.act_fn = ACT2FN[config.hidden_act] + + def forward(self, x): + return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + + +# Copied from transformers.models.llama.modeling_llama.repeat_kv +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +# Copied from transformers.models.qwen2.modeling_qwen2.Qwen2Attention with Qwen2->Qwen2Moe +class Qwen2MoeAttention(nn.Module): + """ + Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer + and "Generating Long Sequences with Sparse Transformers". + """ + + def __init__(self, config: Qwen2MoeConfig, layer_idx: Optional[int] = None): + super().__init__() + self.config = config + self.layer_idx = layer_idx + if layer_idx is None: + logger.warning_once( + f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will " + "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " + "when creating this class." + ) + + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.max_position_embeddings = config.max_position_embeddings + self.rope_theta = config.rope_theta + self.is_causal = True + self.attention_dropout = config.attention_dropout + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True) + self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True) + self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True) + self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) + + self.rotary_emb = Qwen2MoeRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + if self.layer_idx is None: + raise ValueError( + f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " + "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " + "with a layer index." + ) + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + cos, sin = self.rotary_emb(value_states, position_ids) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: # no matter the length, we just slice it + causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] + attn_weights = attn_weights + causal_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +# Copied from transformers.models.qwen2.modeling_qwen2.Qwen2FlashAttention2 with Qwen2->Qwen2Moe +class Qwen2MoeFlashAttention2(Qwen2MoeAttention): + """ + Qwen2Moe flash attention module, following Qwen2Moe attention module. This module inherits from `Qwen2MoeAttention` + as the weights of the module stays untouched. The only required change would be on the forward pass + where it needs to correctly call the public API of flash attention and deal with padding tokens + in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom + config.max_window_layers layers. + """ + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + ): + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + if self.layer_idx is None: + raise ValueError( + f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " + "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " + "with a layer index." + ) + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + + cos, sin = self.rotary_emb(value_states, position_ids) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + use_sliding_windows = ( + _flash_supports_window_size + and getattr(self.config, "sliding_window", None) is not None + and kv_seq_len > self.config.sliding_window + and self.config.use_sliding_window + ) + + if not _flash_supports_window_size: + logger.warning_once( + "The current flash attention version does not support sliding window attention, for a more memory efficient implementation" + " make sure to upgrade flash-attn library." + ) + + if past_key_value is not None: + # Activate slicing cache only if the config has a value `sliding_windows` attribute + cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0 + if ( + getattr(self.config, "sliding_window", None) is not None + and kv_seq_len > self.config.sliding_window + and cache_has_contents + ): + slicing_tokens = 1 - self.config.sliding_window + + past_key = past_key_value[self.layer_idx][0] + past_value = past_key_value[self.layer_idx][1] + + past_key = past_key[:, :, slicing_tokens:, :].contiguous() + past_value = past_value[:, :, slicing_tokens:, :].contiguous() + + if past_key.shape[-2] != self.config.sliding_window - 1: + raise ValueError( + f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got" + f" {past_key.shape}" + ) + + if attention_mask is not None: + attention_mask = attention_mask[:, slicing_tokens:] + attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1) + + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + # we slice the states for static kv cache to be supported in FA2. Not sure it's a must as compile fails + # for bsz == 1, avoid using slice to capture cuda graph + if cache_position is not None and q_len > 1: + key_states = key_states[:, :, : cache_position[-1] + 1, :] + value_states = value_states[:, :, : cache_position[-1] + 1, :] + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + dropout_rate = 0.0 if not self.training else self.attention_dropout + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in float16 just to be sure everything works as expected. + input_dtype = query_states.dtype + if input_dtype == torch.float32: + if torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + # Handle the case where the model is quantized + elif hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + # Reashape to the expected shape for Flash Attention + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + attn_output = self._flash_attention_forward( + query_states, + key_states, + value_states, + attention_mask, + q_len, + position_ids=position_ids, + dropout=dropout_rate, + use_sliding_windows=use_sliding_windows, + ) + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + def _flash_attention_forward( + self, + query_states, + key_states, + value_states, + attention_mask, + query_length, + position_ids, + dropout=0.0, + softmax_scale=None, + use_sliding_windows=False, + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`float`): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + use_sliding_windows (`bool`, *optional*): + Whether to activate sliding window attention. + """ + if not self._flash_attn_uses_top_left_mask: + causal = self.is_causal + else: + # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. + causal = self.is_causal and query_length != 1 + + # Decide whether to use SWA or not by layer index. + if use_sliding_windows and self.layer_idx >= self.config.max_window_layers: + use_sliding_windows = False + + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + if not use_sliding_windows: + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + else: + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + window_size=(self.config.sliding_window, self.config.sliding_window), + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + if not use_sliding_windows: + if query_length == 1: + position_ids = position_ids.to(dtype=torch.int32).squeeze(1) + attn_output = flash_attn_with_kvcache( + query_states, + key_states, + value_states, + cache_seqlens=position_ids, + softmax_scale=softmax_scale, + causal=causal, + ) + else: + attn_output = flash_attn_func( + query_states, + key_states, + value_states, + dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + else: + attn_output = flash_attn_func( + query_states, + key_states, + value_states, + dropout, + softmax_scale=softmax_scale, + causal=causal, + window_size=(self.config.sliding_window, self.config.sliding_window), + ) + + return attn_output + + # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape + + # On the first iteration we need to properly re-create the padding mask + # by slicing it on the proper place + if kv_seq_len != attention_mask.shape[-1]: + attention_mask_num_tokens = attention_mask.shape[-1] + attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :] + + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + + key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) + value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) + + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +# Copied from transformers.models.mixtral.modeling_mixtral.MixtralSdpaAttention with Mixtral->Qwen2Moe +class Qwen2MoeSdpaAttention(Qwen2MoeAttention): + """ + Qwen2Moe attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from + `Qwen2MoeAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to + SDPA API. + """ + + # Adapted from Qwen2MoeAttention.forward + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if output_attentions: + # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. + logger.warning_once( + "Qwen2MoeModel is using Qwen2MoeSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " + 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' + ) + return super().forward( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + cos, sin = self.rotary_emb(value_states, position_ids) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + causal_mask = attention_mask + if attention_mask is not None: # no matter the length, we just slice it + causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] + + # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, + # Reference: https://github.com/pytorch/pytorch/issues/112577. + if query_states.device.type == "cuda" and attention_mask is not None: + query_states = query_states.contiguous() + key_states = key_states.contiguous() + value_states = value_states.contiguous() + + # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment + # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. + # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1. + is_causal = True if causal_mask is None and q_len > 1 else False + + attn_output = torch.nn.functional.scaled_dot_product_attention( + query_states, + key_states, + value_states, + attn_mask=causal_mask, + dropout_p=self.attention_dropout if self.training else 0.0, + is_causal=is_causal, + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.view(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + return attn_output, None, past_key_value + + +QWEN2MOE_ATTENTION_CLASSES = { + "eager": Qwen2MoeAttention, + "flash_attention_2": Qwen2MoeFlashAttention2, + "sdpa": Qwen2MoeSdpaAttention, +} + + +class Qwen2MoeSparseMoeBlock(nn.Module): + def __init__(self, config): + super().__init__() + self.num_experts = config.num_experts + self.top_k = config.num_experts_per_tok + self.norm_topk_prob = config.norm_topk_prob + + # gating + self.gate = nn.Linear(config.hidden_size, config.num_experts, bias=False) + self.experts = nn.ModuleList( + [Qwen2MoeMLP(config, intermediate_size=config.moe_intermediate_size) for _ in range(self.num_experts)] + ) + + self.shared_expert = Qwen2MoeMLP(config, intermediate_size=config.shared_expert_intermediate_size) + self.shared_expert_gate = torch.nn.Linear(config.hidden_size, 1, bias=False) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + """ """ + batch_size, sequence_length, hidden_dim = hidden_states.shape + hidden_states = hidden_states.view(-1, hidden_dim) + # router_logits: (batch * sequence_length, n_experts) + router_logits = self.gate(hidden_states) + + routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) + routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1) + if self.norm_topk_prob: + routing_weights /= routing_weights.sum(dim=-1, keepdim=True) + # we cast back to the input dtype + routing_weights = routing_weights.to(hidden_states.dtype) + + final_hidden_states = torch.zeros( + (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device + ) + + # One hot encode the selected experts to create an expert mask + # this will be used to easily index which expert is going to be sollicitated + expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0) + + # Loop over all available experts in the model and perform the computation on each expert + for expert_idx in range(self.num_experts): + expert_layer = self.experts[expert_idx] + idx, top_x = torch.where(expert_mask[expert_idx]) + + # Index the correct hidden states and compute the expert hidden state for + # the current expert. We need to make sure to multiply the output hidden + # states by `routing_weights` on the corresponding tokens (top-1 and top-2) + current_state = hidden_states[None, top_x].reshape(-1, hidden_dim) + current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None] + + # However `index_add_` only support torch tensors for indexing so we'll use + # the `top_x` tensor here. + final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype)) + + shared_expert_output = self.shared_expert(hidden_states) + shared_expert_output = F.sigmoid(self.shared_expert_gate(hidden_states)) * shared_expert_output + + final_hidden_states = final_hidden_states + shared_expert_output + + final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim) + return final_hidden_states, router_logits + + +class Qwen2MoeDecoderLayer(nn.Module): + def __init__(self, config: Qwen2MoeConfig, layer_idx: int): + super().__init__() + self.hidden_size = config.hidden_size + + self.self_attn = QWEN2MOE_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx) + + if (layer_idx not in config.mlp_only_layers) and ( + config.num_experts > 0 and (layer_idx + 1) % config.decoder_sparse_step == 0 + ): + self.mlp = Qwen2MoeSparseMoeBlock(config) + else: + self.mlp = Qwen2MoeMLP(config, intermediate_size=config.intermediate_size) + + self.input_layernorm = Qwen2MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.post_attention_layernorm = Qwen2MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + output_router_logits: Optional[bool] = False, + use_cache: Optional[bool] = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, sequence_length)` where padding elements are indicated by 0. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_router_logits (`bool`, *optional*): + Whether or not to return the logits of all the routers. They are useful for computing the router loss, + and should not be returned during inference. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): + Indices depicting the position of the input sequence tokens in the sequence. + kwargs (`dict`, *optional*): + Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code + into the model + """ + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + + hidden_states = self.mlp(hidden_states) + if isinstance(hidden_states, tuple): + hidden_states, router_logits = hidden_states + else: + router_logits = None + + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + if output_router_logits: + outputs += (router_logits,) + + return outputs + + +QWEN2MOE_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`Qwen2MoeConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare Qwen2MoE Model outputting raw hidden-states without any specific head on top.", + QWEN2MOE_START_DOCSTRING, +) +class Qwen2MoePreTrainedModel(PreTrainedModel): + config_class = Qwen2MoeConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["Qwen2MoeDecoderLayer"] + _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True + _supports_sdpa = True + _supports_cache_class = True + _supports_static_cache = True + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +QWEN2MOE_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): + Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` + returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. + + Two formats are allowed: + - a [`~cache_utils.Cache`] instance; + - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy + cache format. + + The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the + legacy cache format will be returned. + + If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't + have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` + of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + output_router_logits (`bool`, *optional*): + Whether or not to return the logits of all the routers. They are useful for computing the router loss, and + should not be returned during inference. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): + Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, + this tensor is not affected by padding. It is used to update the cache in the correct position and to infer + the complete sequence length. +""" + + +@add_start_docstrings( + "The bare Qwen2MoE Model outputting raw hidden-states without any specific head on top.", + QWEN2MOE_START_DOCSTRING, +) +class Qwen2MoeModel(Qwen2MoePreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2MoeDecoderLayer`] + + Args: + config: Qwen2MoeConfig + """ + + def __init__(self, config: Qwen2MoeConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.layers = nn.ModuleList( + [Qwen2MoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] + ) + self._attn_implementation = config._attn_implementation + self.norm = Qwen2MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + @add_start_docstrings_to_model_forward(QWEN2MOE_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_router_logits: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + ) -> Union[Tuple, MoeModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_router_logits = ( + output_router_logits if output_router_logits is not None else self.config.output_router_logits + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if (input_ids is None) ^ (inputs_embeds is not None): + raise ValueError( + "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one" + ) + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + use_legacy_cache = False + if use_cache and not isinstance(past_key_values, Cache): + use_legacy_cache = True + past_key_values = DynamicCache.from_legacy_cache(past_key_values) + logger.warning_once( + "We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. " + "Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)" + ) + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if cache_position is None: + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + ) + if position_ids is None: + position_ids = cache_position.unsqueeze(0) + + causal_mask = self._update_causal_mask( + attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions + ) + + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_router_logits = () if output_router_logits else None + next_decoder_cache = None + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + causal_mask, + position_ids, + past_key_values, + output_attentions, + output_router_logits, + use_cache, + cache_position, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=causal_mask, + position_ids=position_ids, + past_key_value=past_key_values, + output_attentions=output_attentions, + output_router_logits=output_router_logits, + use_cache=use_cache, + cache_position=cache_position, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache = layer_outputs[2 if output_attentions else 1] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + if output_router_logits and layer_outputs[-1] is not None: + all_router_logits += (layer_outputs[-1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = None + if use_cache: + next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache + + if not return_dict: + return tuple( + v + for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits] + if v is not None + ) + return MoeModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + router_logits=all_router_logits, + ) + + # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask + def _update_causal_mask( + self, + attention_mask: torch.Tensor, + input_tensor: torch.Tensor, + cache_position: torch.Tensor, + past_key_values: Cache, + output_attentions: bool, + ): + # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static + # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes. + # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using + # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114 + + if self.config._attn_implementation == "flash_attention_2": + if attention_mask is not None and 0.0 in attention_mask: + return attention_mask + return None + + # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in + # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail + # to infer the attention mask. + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + using_static_cache = isinstance(past_key_values, StaticCache) + + # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward + if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: + if AttentionMaskConverter._ignore_causal_mask_sdpa( + attention_mask, + inputs_embeds=input_tensor, + past_key_values_length=past_seen_tokens, + is_training=self.training, + ): + return None + + dtype, device = input_tensor.dtype, input_tensor.device + min_dtype = torch.finfo(dtype).min + sequence_length = input_tensor.shape[1] + if using_static_cache: + target_length = past_key_values.get_max_length() + else: + target_length = ( + attention_mask.shape[-1] + if isinstance(attention_mask, torch.Tensor) + else past_seen_tokens + sequence_length + 1 + ) + + if attention_mask is not None and attention_mask.dim() == 4: + # in this case we assume that the mask comes already in inverted form and requires no inversion or slicing + if attention_mask.max() != 0: + raise ValueError("Custom 4D attention mask should be passed in inverted form with max==0`") + causal_mask = attention_mask + else: + causal_mask = torch.full( + (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device + ) + if sequence_length != 1: + causal_mask = torch.triu(causal_mask, diagonal=1) + causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) + causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1) + if attention_mask is not None: + causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit + mask_length = attention_mask.shape[-1] + padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] + padding_mask = padding_mask == 0 + causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( + padding_mask, min_dtype + ) + if ( + self.config._attn_implementation == "sdpa" + and attention_mask is not None + and attention_mask.device.type == "cuda" + and not output_attentions + ): + # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when + # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. + # Details: https://github.com/pytorch/pytorch/issues/110213 + causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) + + return causal_mask + + +class Qwen2MoeForCausalLM(Qwen2MoePreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.model = Qwen2MoeModel(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + self.router_aux_loss_coef = config.router_aux_loss_coef + self.num_experts = config.num_experts + self.num_experts_per_tok = config.num_experts_per_tok + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + @add_start_docstrings_to_model_forward(QWEN2MOE_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_router_logits: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + ) -> Union[Tuple, MoeCausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, Qwen2MoeForCausalLM + + >>> model = Qwen2MoeForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) + >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_router_logits = ( + output_router_logits if output_router_logits is not None else self.config.output_router_logits + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + output_router_logits=output_router_logits, + return_dict=return_dict, + cache_position=cache_position, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + logits = logits.float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + aux_loss = None + if output_router_logits: + aux_loss = load_balancing_loss_func( + outputs.router_logits if return_dict else outputs[-1], + self.num_experts, + self.num_experts_per_tok, + attention_mask, + ) + if labels is not None: + loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device + + if not return_dict: + output = (logits,) + outputs[1:] + if output_router_logits: + output = (aux_loss,) + output + return (loss,) + output if loss is not None else output + + return MoeCausalLMOutputWithPast( + loss=loss, + aux_loss=aux_loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + router_logits=outputs.router_logits, + ) + + def prepare_inputs_for_generation( + self, + input_ids, + past_key_values=None, + attention_mask=None, + inputs_embeds=None, + cache_position=None, + use_cache=True, + **kwargs, + ): + past_length = 0 + # Omit tokens covered by past_key_values + if past_key_values is not None: + if isinstance(past_key_values, Cache): + past_length = cache_position[0] if cache_position is not None else past_key_values.get_seq_length() + max_cache_length = ( + torch.tensor(past_key_values.get_max_length(), device=input_ids.device) + if past_key_values.get_max_length() is not None + else None + ) + cache_length = past_length if max_cache_length is None else torch.min(max_cache_length, past_length) + # TODO joao: remove this `else` after `generate` prioritizes `Cache` objects + else: + cache_length = past_length = past_key_values[0][0].shape[2] + max_cache_length = None + + # Keep only the unprocessed tokens: + # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where + # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as + # input) + if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]: + input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :] + # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard + # input_ids based on the past_length. + elif past_length < input_ids.shape[1]: + input_ids = input_ids[:, past_length:] + # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. + + # If we are about to go beyond the maximum cache length, we need to crop the input attention mask. + if ( + max_cache_length is not None + and attention_mask is not None + and cache_length + input_ids.shape[1] > max_cache_length + ): + attention_mask = attention_mask[:, -max_cache_length:] + + position_ids = kwargs.get("position_ids", None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_length == 0: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1] + if cache_position is None: + cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device) + elif use_cache: + cache_position = cache_position[-input_length:] + + model_inputs.update( + { + "position_ids": position_ids, + "past_key_values": past_key_values, + "use_cache": use_cache, + "attention_mask": attention_mask, + "cache_position": cache_position, + } + ) + return model_inputs + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past + + +@add_start_docstrings( + """ + The Qwen2MoE Model transformer with a sequence classification head on top (linear layer). + + [`Qwen2MoeForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + QWEN2MOE_START_DOCSTRING, +) +# Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Qwen2Moe, LLAMA->QWEN2MOE +class Qwen2MoeForSequenceClassification(Qwen2MoePreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = Qwen2MoeModel(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(QWEN2MOE_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility + sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 + sequence_lengths = sequence_lengths % input_ids.shape[-1] + sequence_lengths = sequence_lengths.to(logits.device) + else: + sequence_lengths = -1 + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + +@add_start_docstrings( + """ + The Qwen2MoE Model transformer with a token classification head on top (a linear layer on top of the hidden-states + output) e.g. for Named-Entity-Recognition (NER) tasks. + """, + QWEN2MOE_START_DOCSTRING, +) +# Copied from transformers.models.llama.modeling_llama.LlamaForTokenClassification with Llama->Qwen2Moe, LLAMA->QWEN2MOE +class Qwen2MoeForTokenClassification(Qwen2MoePreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = Qwen2MoeModel(config) + if getattr(config, "classifier_dropout", None) is not None: + classifier_dropout = config.classifier_dropout + elif getattr(config, "hidden_dropout", None) is not None: + classifier_dropout = config.hidden_dropout + else: + classifier_dropout = 0.1 + self.dropout = nn.Dropout(classifier_dropout) + self.score = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(QWEN2MOE_INPUTS_DOCSTRING) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, TokenClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = outputs[0] + sequence_output = self.dropout(sequence_output) + logits = self.score(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/ktransformers/operators/RoPE.py b/ktransformers/operators/RoPE.py new file mode 100644 index 0000000..5fcce4f --- /dev/null +++ b/ktransformers/operators/RoPE.py @@ -0,0 +1,65 @@ +''' +Description : +Author : Boxin Zhang +Version : 0.1.0 +Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +''' +from torch import nn +from ktransformers.models.modeling_deepseek import DeepseekV2YarnRotaryEmbedding, DeepseekV2RotaryEmbedding +from ktransformers.operators.base_operator import BaseInjectedModule +from ktransformers.util.custom_gguf import GGUFLoader +from ktransformers.util.utils import InferenceState +from transformers.configuration_utils import PretrainedConfig +# Copied from transformers.models.mixtral.modeling_mixtral.MixtralRotaryEmbedding with Mixtral->Qwen2Moe +class RotaryEmbedding(BaseInjectedModule, DeepseekV2RotaryEmbedding): + def __init__(self, + key: str, + gguf_loader : GGUFLoader, + config: PretrainedConfig, + orig_module: nn.Module, + device: str = "cuda", + **kwargs): + BaseInjectedModule.__init__(self, key, gguf_loader, config, orig_module, device, **kwargs) + self.orig_module.__init__(orig_module.dim, + orig_module.max_position_embeddings, + orig_module.base) + + def load(self): + self.orig_module.__init__(self.orig_module.dim, + self.orig_module.max_position_embeddings, + self.orig_module.base, + self.device) + +class YarnRotaryEmbedding(BaseInjectedModule, DeepseekV2YarnRotaryEmbedding): + def __init__(self, + key: str, + gguf_loader : GGUFLoader, + config: PretrainedConfig, + orig_module: nn.Module, + device: str = "cuda", + **kwargs): + BaseInjectedModule.__init__(self, key, gguf_loader, config, orig_module, device, **kwargs) + self.orig_module.__init__(orig_module.dim, + orig_module.max_position_embeddings, + orig_module.base, + None, #device + orig_module.scaling_factor, + orig_module.original_max_position_embeddings, + orig_module.beta_fast, + orig_module.beta_slow, + orig_module.mscale, + orig_module.mscale_all_dim) + + + def load(self): + self.orig_module.__init__(self.orig_module.dim, + self.orig_module.max_position_embeddings, + self.orig_module.base, + self.device, + self.orig_module.scaling_factor, + self.orig_module.original_max_position_embeddings, + self.orig_module.beta_fast, + self.orig_module.beta_slow, + self.orig_module.mscale, + self.orig_module.mscale_all_dim) + diff --git a/ktransformers/operators/__init__.py b/ktransformers/operators/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/ktransformers/operators/__init__.py @@ -0,0 +1 @@ + diff --git a/ktransformers/operators/attention.py b/ktransformers/operators/attention.py new file mode 100644 index 0000000..0648f51 --- /dev/null +++ b/ktransformers/operators/attention.py @@ -0,0 +1,199 @@ +''' +Description : +Author : Boxin Zhang +Version : 0.1.0 +Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +''' +import torch +from torch import nn +import warnings +from ktransformers.models.configuration_deepseek import DeepseekV2Config +from ktransformers.models.modeling_deepseek import DeepseekV2Attention, apply_rotary_pos_emb +from typing import Optional, Tuple +from ktransformers.operators.base_operator import BaseInjectedModule +from ktransformers.util.custom_gguf import GGUFLoader +from transformers.configuration_utils import PretrainedConfig +from transformers.cache_utils import Cache + +class DeepseekV2AttentionInjected(BaseInjectedModule, DeepseekV2Attention): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, + key: str, + gguf_loader : GGUFLoader, + config: PretrainedConfig, + orig_module: nn.Module, + device: str = "cuda", + **kwargs): + BaseInjectedModule.__init__(self, key, gguf_loader, config, orig_module, device, **kwargs) + self.orig_module.__init__(orig_module.config, + orig_module.layer_idx) + + def get_absorbed(self) -> Tuple[torch.Tensor, torch.Tensor]: + if not (hasattr(self, 'q_absorb') and hasattr(self, 'out_absorb')): + kv_b_proj = self.kv_b_proj.weight.view(self.num_heads, -1, self.kv_lora_rank) + q_absorb = kv_b_proj[:, :self.qk_nope_head_dim, :].reshape(-1, self.kv_lora_rank) + out_absorb = kv_b_proj[:, self.qk_nope_head_dim:, :].reshape(-1, self.kv_lora_rank) + self.q_absorb = nn.Linear(self.kv_lora_rank, self.num_heads * self.qk_nope_head_dim, + bias=False, dtype=q_absorb.dtype, device=q_absorb.device) + self.q_absorb.weight.data = q_absorb + self.out_absorb = nn.Linear(self.kv_lora_rank, self.num_heads * self.v_head_dim, + bias=False, dtype=out_absorb.dtype, device=out_absorb.device) + self.out_absorb.weight.data = out_absorb + del self.orig_module.kv_b_proj + q_absorb = self.q_absorb.weight.view(self.num_heads, self.qk_nope_head_dim, self.kv_lora_rank) + out_absorb = self.out_absorb.weight.view(self.num_heads, self.v_head_dim, self.kv_lora_rank) + return q_absorb, out_absorb + + def forward_chunck( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + if self.q_lora_rank is None: + q = self.q_proj(hidden_states) + else: + q = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states))) + q = q.view(bsz, q_len, self.num_heads, self.q_head_dim).transpose(1, 2) + q_nope, q_pe = torch.split( + q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1 + ) + + compressed_kv = self.kv_a_proj_with_mqa(hidden_states) + compressed_kv, k_pe = torch.split( + compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1 + ) + compressed_kv = self.kv_a_layernorm(compressed_kv) + k_pe = k_pe.view(bsz, q_len, 1, self.qk_rope_head_dim).transpose(1, 2) + + kv_seq_len = k_pe.shape[-2] + if past_key_value is not None: + if self.layer_idx is None: + raise ValueError( + f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " + "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " + "with a layer index." + ) + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + + cos, sin = self.rotary_emb(q_pe, position_ids) + q_pe, k_pe = apply_rotary_pos_emb(q_pe, k_pe, cos, sin) + + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models + compressed_kv = compressed_kv.unsqueeze(1) + k_pe, compressed_kv = past_key_value.update(k_pe, compressed_kv, self.layer_idx, cache_kwargs) + compressed_kv = compressed_kv.squeeze(1) + #if cache_position is not None: + # compressed_kv = compressed_kv[:,: cache_position[-1] + 1,:] + # k_pe = k_pe[:,:,: cache_position[-1] + 1,:] + q_absorb, out_absorb = self.get_absorbed() + + q_nope = torch.matmul(q_nope, q_absorb) + attn_weights = (torch.matmul(q_pe, k_pe.mT) + torch.matmul(q_nope, compressed_kv.unsqueeze(-3).mT)) * self.softmax_scale + """ + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" + f" {attn_weights.size()}" + ) + assert attention_mask is not None + """ + if attention_mask is not None: + """ + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" + ) + """ + #causal_mask = attention_mask[:, :, :, : kv_seq_len] + attn_weights = attn_weights + attention_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax( + attn_weights, dim=-1, dtype=torch.float32 + ).to(q_pe.dtype) + attn_weights = nn.functional.dropout( + attn_weights, p=self.attention_dropout, training=self.training + ) + attn_output = torch.einsum('bhql,blc->bhqc', attn_weights, compressed_kv) + + attn_output = torch.matmul(attn_output, out_absorb.mT) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.v_head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.v_head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + + attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.v_head_dim) + + attn_output = self.o_proj(attn_output) + + return attn_output, None, past_key_value + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + bsz, q_len, _ = hidden_states.size() + chunck_size = 256 # TODO, generate chunck_size automatically. + + if q_len <= chunck_size: + return self.forward_chunck( + hidden_states, + attention_mask, + position_ids, + past_key_value, + output_attentions, + use_cache, + cache_position, + **kwargs + ) + + assert output_attentions == False, "output_attentions is not supported when using chunked attention" + attn_output = None + cur_idx = 0 + while cur_idx < q_len: + if attention_mask is not None: + chunk_mask = attention_mask[:, :, cur_idx:min(cur_idx + chunck_size, q_len), ...] + else: + chunk_mask = None + + cur_output, _, _ = self.forward_chunck( + hidden_states[:, cur_idx:min(cur_idx + chunck_size, q_len), ...], + chunk_mask, + position_ids[:, cur_idx:min(cur_idx + chunck_size, q_len)], + past_key_value, + output_attentions, + use_cache, + cache_position[cur_idx:min(cur_idx + chunck_size, q_len)], + **kwargs + ) + cur_idx += chunck_size + if attn_output is None: + attn_output = cur_output + else: + attn_output = torch.cat((attn_output, cur_output), dim=-2) + + return attn_output, None, past_key_value diff --git a/ktransformers/operators/base_operator.py b/ktransformers/operators/base_operator.py new file mode 100644 index 0000000..1cf1471 --- /dev/null +++ b/ktransformers/operators/base_operator.py @@ -0,0 +1,60 @@ +''' +Description : +Author : Boxin Zhang +Version : 0.1.0 +Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +''' +from typing import Any +from torch import nn, Tensor +from ktransformers.util.custom_gguf import GGUFLoader +from transformers.configuration_utils import PretrainedConfig +import ktransformers.util.utils as utils +class BaseInjectedModule(nn.Module): + + def __init__(self, + key: str, + gguf_loader : GGUFLoader, + config: PretrainedConfig, + orig_module: nn.Module, + device: str = "cuda", + **kwargs): + nn.Module.__init__(self) + nn.Module.__setattr__(self, "orig_module", orig_module) + object.__setattr__(self, "key", key) + object.__setattr__(self, "gguf_loader", gguf_loader) + object.__setattr__(self, "config", config) + object.__setattr__(self, "device", device) + + def __getattr__(self, name: str) -> Any: + # __getattr__ in nn.Module doesn't call super().__getattribute__ when name is not in nn.Module.__dict__, + # but __setattr__ in nn.Module call super().__setattr__ in that case, there may be some attribute set + # but can't get using __getattr__, typically these attr is build in attr of the class, so class.attr does not + # call __getattr__. + # Example: + # ...import torch + # ...l=torch.nn.Linear(100,200) + # ...l.out_features # 200 + # ...l.__getattr__("out_features") # AttributeError: 'Linear' object has no attribute 'out_features' + try: + return object.__getattribute__(self, name) # if this attr belongs to BaseInjectedModule + except: + if name == "orig_module": + return nn.Module.__getattr__(self, "orig_module") + try: + return nn.Module.__getattr__(self, "orig_module").__getattr__(name) # if this attr belongs to orig_module + except: + return super(nn.Module, nn.Module.__getattr__(self, "orig_module")).__getattribute__(name) # if this attr belongs to orig_module but not in nn.Module.__dict__ + + def __setattr__(self, name: str, value: Tensor | nn.Module) -> None: + if name == "orig_module": + return nn.Module.__setattr__(self, "orig_module", value) + elif hasattr(self, name): + return object.__setattr__(self, name, value) + return nn.Module.__getattr__(self, "orig_module").__setattr__(name, value) + + def forward(self, *args, **kwargs): + return self.orig_module.forward(*args, **kwargs) + + def load(self): + for name, child in self._modules.items(): + utils.load_weights(child, self.gguf_loader, self.key+".") diff --git a/ktransformers/operators/experts.py b/ktransformers/operators/experts.py new file mode 100644 index 0000000..6adb657 --- /dev/null +++ b/ktransformers/operators/experts.py @@ -0,0 +1,679 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Description : +Author : Azure-Tang, Boxin Zhang, chenht2022 +Date : 2024-07-25 11:25:24 +Version : 0.1.0 +LastEditors : Azure +LastEditTime : 2024-07-26 09:27:41 +Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +''' + +from typing import Any, Union +import numpy as np +import numpy.typing as npt +from torch import Tensor, nn +import torch.nn.functional as F +import torch +import sys, os +from ktransformers.operators.base_operator import BaseInjectedModule + +sys.path.append(os.path.dirname(__file__) + "/../ktransformers_ext/build") +import cpuinfer_ext +from cpuinfer_ext.moe import MOEConfig, MOE +import ctypes +from ktransformers.util.custom_gguf import GGUFLoader +from ktransformers.util.utils import InferenceState +from ktransformers.server.config.config import Config +from transformers.activations import ACT2FN +from transformers.configuration_utils import PretrainedConfig +from abc import ABC, abstractmethod +from ktransformers.operators.linear import QuantizedLinearMarlin, QuantizedLinearTorch, KTransformerLinear +import time + + +# class Base(BaseInjectedModule, ABC): +class MLPExpertsBase(ABC): + def __init__(self, key: str, gguf_loader: GGUFLoader, config: PretrainedConfig, orig_module: nn.Module, device: str = "cuda", **kwargs): + # super().__init__(key, gguf_loader, config, orig_module, device, **kwargs) + self.key = key + self.gguf_loader = gguf_loader + self.config = config + self.device = device + + @abstractmethod + def forward(self, input_tensor, expert_ids, weights): + pass + + @abstractmethod + def load(self, w: dict | nn.Parameter | tuple | None = None, device: str = "cpu", warmup: bool = False): + pass + + @abstractmethod + def unload(): + pass + + def load_weights(self, override_key: str | None = None, device: str = "cpu"): + res = {} + if override_key is not None: + keys = override_key + else: + keys = [self.key] + + gate = None + up = None + down = None + gate_type = None + up_type = None + down_type = None + + for key in keys: + if key + ".ffn_gate_exps.weight" in self.gguf_loader.tensor_info: + targets = [".ffn_gate_exps.weight", ".ffn_up_exps.weight", ".ffn_down_exps.weight" ] + tensors = self.load_multi(key, targets, device=device) + gate = tensors[".ffn_gate_exps.weight"] + up = tensors[".ffn_up_exps.weight"] + down = tensors[".ffn_down_exps.weight"] + gate_type = self.gguf_loader.tensor_info[key + ".ffn_gate_exps.weight"]["ggml_type"] + up_type = self.gguf_loader.tensor_info[key + ".ffn_up_exps.weight"]["ggml_type"] + down_type = self.gguf_loader.tensor_info[key + ".ffn_down_exps.weight"]["ggml_type"] + else: + raise ValueError(f"Experts {key} not found in gguf_loader") + res = {key:{"gate": gate, "up": up, "down": down, "gate_type": gate_type, "up_type": up_type, "down_type": down_type}} + return res + + def load_multi(self, key: str, keys: list[str], device: str = "cpu"): + tensors = {} + for k in keys: + tensors[k] = self.gguf_loader.load_gguf_tensor(key + k, device=device) + return tensors + +class MLPCPUExperts(MLPExpertsBase): + input_tensor_cpu:Tensor = None + expert_ids_cpu:Tensor = None + weights_cpu:Tensor = None + output_cpu:Tensor = None + output_gpu:Tensor = None + CPU_INFER = cpuinfer_ext.CPUInfer(Config().cpu_infer) + def __init__( + self, + key: str, + gguf_loader: GGUFLoader, + config: PretrainedConfig, + n_routed_experts: int, + orig_module: nn.Module = None, + device: str = "cpu", + out_device: str = "cuda", # this device mean which device the output should on + **kwargs + ): + super().__init__(key, gguf_loader, config, orig_module, device, **kwargs) + assert device.lower() == "cpu", "MLPCPUExperts can only be loaded on CPU" + self.n_routed_experts = n_routed_experts + self.out_device = out_device + + def load(self, w: dict | nn.Parameter | tuple | None = None, device:str|None = None, warmup:bool = False): + if device: + assert device.lower() == "cpu", "MLPCPUExperts can only be loaded on CPU, Parameter \"device\" can be cpu or None." + if w is None: w = self.load_weights()[self.key] + self.gate = w["gate"] + self.up = w["up"] + self.down = w["down"] + self.gate_type = w["gate_type"] + self.up_type = w["up_type"] + self.down_type = w["down_type"] + gate_ptr = ctypes.addressof( + ctypes.cast(self.gate.ctypes.data, ctypes.POINTER(ctypes.c_uint64)).contents + ) + up_ptr = ctypes.addressof( + ctypes.cast(self.up.ctypes.data, ctypes.POINTER(ctypes.c_uint64)).contents + ) + down_ptr = ctypes.addressof( + ctypes.cast(self.down.ctypes.data, ctypes.POINTER(ctypes.c_uint64)).contents + ) + # print(self.gate_qtype, self.up_qtype, self.down_qtype) + n_routed_experts = self.n_routed_experts + # n_routed_experts = len(self.orig_module) + moe_config = MOEConfig( + n_routed_experts, + self.config.num_experts_per_tok, + self.config.hidden_size, + self.config.moe_intermediate_size, + 64, + 10, + 1024, + gate_ptr, + up_ptr, + down_ptr, + self.gate_type, + self.up_type, + self.down_type, + 30, # TODO: get from model.dtype + ) + # print(n_routed_experts, hidden_size, moe_intermediate_size) + num_experts_per_tok = self.config.num_experts_per_tok + self.moe = MOE(moe_config) + self.cpu_infer = MLPCPUExperts.CPU_INFER + if warmup: + self.cpu_infer.submit(self.moe.warm_up) + self.cpu_infer.sync() + if MLPCPUExperts.output_gpu == None: + MLPCPUExperts.input_tensor_cpu = torch.empty((self.config.hidden_size), device="cpu", pin_memory=True) + MLPCPUExperts.expert_ids_cpu = torch.empty((num_experts_per_tok), device="cpu", dtype=torch.long, pin_memory=True) + MLPCPUExperts.weights_cpu = torch.empty((num_experts_per_tok), device="cpu", dtype=torch.float32, pin_memory=True) + MLPCPUExperts.output_cpu = torch.empty((self.config.hidden_size), device="cpu", pin_memory=True) + MLPCPUExperts.output_gpu = torch.empty((self.config.hidden_size), device=self.out_device) + + def submit_for_one_decode(self, input_tensor, expert_ids, weights): + MLPCPUExperts.input_tensor_cpu.copy_(input_tensor, non_blocking=True) + MLPCPUExperts.expert_ids_cpu.copy_(expert_ids, non_blocking=True) + MLPCPUExperts.weights_cpu.copy_(weights, non_blocking=True) + self.cpu_infer.submit_with_cuda_stream(torch.cuda.current_stream().cuda_stream, self.moe.forward, 1, expert_ids.size(0), MLPCPUExperts.expert_ids_cpu.data_ptr(), MLPCPUExperts.weights_cpu.data_ptr(), MLPCPUExperts.input_tensor_cpu.data_ptr(), MLPCPUExperts.output_cpu.data_ptr()) + + def sync_for_one_decode(self): + self.cpu_infer.sync_with_cuda_stream(torch.cuda.current_stream().cuda_stream) + MLPCPUExperts.output_gpu.copy_(MLPCPUExperts.output_cpu, non_blocking=True) + #print("capturing experts finish") + return MLPCPUExperts.output_gpu + + def forward(self, input_tensor, expert_ids, weights): + # generate, capture and run cuda graph + if input_tensor.size(0)==1: + #print("capturing experts") + MLPCPUExperts.input_tensor_cpu.copy_(input_tensor, non_blocking=True) + MLPCPUExperts.expert_ids_cpu.copy_(expert_ids, non_blocking=True) + MLPCPUExperts.weights_cpu.copy_(weights, non_blocking=True) + self.cpu_infer.submit_with_cuda_stream(torch.cuda.current_stream().cuda_stream, self.moe.forward, 1, expert_ids.size(1), MLPCPUExperts.expert_ids_cpu.data_ptr(), MLPCPUExperts.weights_cpu.data_ptr(), MLPCPUExperts.input_tensor_cpu.data_ptr(), MLPCPUExperts.output_cpu.data_ptr()) + self.cpu_infer.sync_with_cuda_stream(torch.cuda.current_stream().cuda_stream) + MLPCPUExperts.output_gpu.copy_(MLPCPUExperts.output_cpu, non_blocking=True) + #print("capturing experts finish") + return MLPCPUExperts.output_gpu + else: + input_tensor = input_tensor.contiguous().cpu() + expert_ids = expert_ids.contiguous().cpu() + weights = weights.contiguous().to(torch.float32).cpu() + output = torch.empty_like(input_tensor).contiguous() + self.cpu_infer.submit(self.moe.forward, expert_ids.size(0), expert_ids.size(1), expert_ids.data_ptr(), weights.data_ptr(), input_tensor.data_ptr(), output.data_ptr()) + self.cpu_infer.sync() + return output.to(device=object.__getattribute__(self, "device")) + + def unload(self): + return + + def load_weights(self, override_key: str | None = None, device: str = "cpu"): + res = {} + if override_key is not None: + keys = override_key + else: + keys = [self.key] + + gate = None + up = None + down = None + gate_type = None + up_type = None + down_type = None + + for key in keys: + if key + ".ffn_gate_exps.weight" in self.gguf_loader.tensor_info: + gate = self.gguf_loader.get_mmap_tensor(key + ".ffn_gate_exps.weight") + up = self.gguf_loader.get_mmap_tensor(key + ".ffn_up_exps.weight") + down = self.gguf_loader.get_mmap_tensor(key + ".ffn_down_exps.weight") + gate_type = self.gguf_loader.tensor_info[key + ".ffn_gate_exps.weight"]["ggml_type"] + up_type = self.gguf_loader.tensor_info[key + ".ffn_up_exps.weight"]["ggml_type"] + down_type = self.gguf_loader.tensor_info[key + ".ffn_down_exps.weight"]["ggml_type"] + else: + raise ValueError(f"Experts {key} not found in gguf_loader") + res = {key:{"gate": gate, "up": up, "down": down, "gate_type": gate_type, "up_type": up_type, "down_type": down_type}} + return res + +class MLPExpertsMarlin(MLPExpertsBase): + expert_num: int + loaded_experts_idx: list[int] + def __init__( + self, + key: str, + gguf_loader: GGUFLoader, + config: PretrainedConfig, + n_routed_experts: int, + orig_module: nn.Module = None, + device: str = "cuda", + **kwargs + ): + super().__init__(key, gguf_loader, config, orig_module, device, **kwargs) + self.expert_num = n_routed_experts + self.loaded_experts_idx = [] + self.act_fn = ACT2FN[config.hidden_act] + assert device.lower() != "cpu", "Marlin experts can only be loaded on GPU" + self.device = device + # create empty marlin experts according to the number of experts per token + # up + self.up_projs = [QuantizedLinearMarlin(key+ "." + "ffn_up_exps", gguf_loader, config, device=device) for i in range(self.expert_num)] + # gate + self.gate_projs = [QuantizedLinearMarlin(key+ "." + "ffn_gate_exps", gguf_loader, config, device=device) for i in range(self.expert_num)] + # down + self.down_projs = [QuantizedLinearMarlin(key+ "." + "ffn_down_exps", gguf_loader, config, device=device) for i in range(self.expert_num)] + + def load(self, w: dict | nn.Parameter | tuple | None = None, device: str | None = None, warmup: bool = False): + if device is None: device = self.device + assert device.lower() != "cpu", "Marlin experts can only be loaded on GPU" + if w is None: w = self.load_weights()[self.key] + + if isinstance(w, dict): + self.gate = nn.Parameter(torch.from_numpy(w["gate"])) + self.up = nn.Parameter(torch.from_numpy(w["up"])) + self.down = nn.Parameter(torch.from_numpy(w["down"])) + for i in range(self.expert_num): + self.up_projs[i].load(self.up[i,...], device=device) + self.gate_projs[i].load(self.gate[i,...], device=device) + self.down_projs[i].load(self.down[i,...], device=device) + self.loaded_experts_idx.append(i) + return + + def unload(self): + for i in self.loaded_experts_idx: + self.up_projs[i].unload() + self.gate_projs[i].unload() + self.down_projs[i].unload() + self.loaded_experts_idx = [] + + def load_weights(self, override_key: str | None = None): + res = {} + if override_key is not None: + keys = override_key + else: + keys = [self.key] + + gate = None + up = None + down = None + gate_type = None + up_type = None + down_type = None + + for key in keys: + if key + ".ffn_gate_exps.weight" in self.gguf_loader.tensor_info: + gate = self.gguf_loader.load_gguf_tensor(key + ".ffn_gate_exps.weight") + up = self.gguf_loader.load_gguf_tensor(key + ".ffn_up_exps.weight") + down = self.gguf_loader.load_gguf_tensor(key + ".ffn_down_exps.weight") + gate_type = self.gguf_loader.tensor_info[key + ".ffn_gate_exps.weight"]["ggml_type"] + up_type = self.gguf_loader.tensor_info[key + ".ffn_up_exps.weight"]["ggml_type"] + down_type = self.gguf_loader.tensor_info[key + ".ffn_down_exps.weight"]["ggml_type"] + # tensors = self.load_multi(key, [".ffn_gate_exps.weight", ".ffn_up_exps.weight", ".ffn_down_exps.weight"]) + res = {key:{"gate": gate, "up": up, "down": down, "gate_type": gate_type, "up_type": up_type, "down_type": down_type}} + return res + + def forward(self, input_tensor:torch.Tensor, expert_ids, weights): + # forward + device = input_tensor.device + input_tensor = input_tensor.to("cuda") + outs = torch.zeros_like(input_tensor) + for expert_idx in range(expert_ids.size(0)): + down_proj = self.down_projs[expert_idx] + gate_proj = self.gate_projs[expert_idx] + up_proj = self.up_projs[expert_idx] + + outs += down_proj(self.act_fn(gate_proj(input_tensor)) * up_proj(input_tensor)) * weights[expert_idx] + outs = outs.to(device) + return outs + +class MLPExpertsTorch(MLPExpertsBase): + expert_num: int + loaded_experts_idx: list[int] + gate: torch.Tensor + up: torch.Tensor + down: torch.Tensor + def __init__( + self, + key: str, + gguf_loader: GGUFLoader, + config: PretrainedConfig, + n_routed_experts: int, + orig_module: nn.Module = None, + device: str = "cpu", + **kwargs + ): + super().__init__(key, gguf_loader, config, orig_module, device, **kwargs) + self.expert_num = n_routed_experts + # self.loaded_experts_idx = [] + self.act_fn = ACT2FN[config.hidden_act] + self.device = device + self.gate = None + self.up = None + self.donw = None + self.dtype = torch.get_default_dtype() + + def load(self, w: dict | nn.Parameter | tuple | None = None, device: str | None = None, warmup: bool = False): + if device is None: device = self.device + if w is None: w = self.load_weights(device=device)[self.key] + + if isinstance(w, dict): + self.gate = w["gate"].to(device=device, dtype=self.dtype) + self.up = w["up"].to(device=device, dtype=self.dtype) + self.down = w["down"].to(device=device, dtype=self.dtype) + + def unload(self): + if self.gate is not None: + self.gate = None + self.up = None + self.down = None + + def forward(self, hidden_states_cpu: torch.Tensor, selected_experts_cpu: torch.Tensor, routing_weights_cpu: torch.Tensor) -> torch.Tensor: + + batch_sequence_length, hidden_dim = hidden_states_cpu.size() + + final_hidden_states = torch.zeros( + (batch_sequence_length, hidden_dim), dtype=self.gate.dtype, device=hidden_states_cpu.device + ) + org_dtype = hidden_states_cpu.dtype + hidden_states_cpu = hidden_states_cpu.to(self.gate.dtype) + routing_weights_cpu = routing_weights_cpu.to(self.gate.dtype) + # One hot encode the selected experts to create an expert mask + # this will be used to easily index which expert is going to be sollicitated + expert_mask = torch.nn.functional.one_hot(selected_experts_cpu, num_classes=self.expert_num).permute(2, 1, 0) + + # Loop over all available experts in the model and perform the computation on each expert + for expert_idx in range(self.expert_num): + idx, top_x = torch.where(expert_mask[expert_idx]) + # Index the correct hidden states and compute the expert hidden state for + # the current expert. We need to make sure to multiply the output hidden + # states by `routing_weights` on the corresponding tokens (top-1 and top-2) + current_state = hidden_states_cpu[None, top_x].reshape(-1, hidden_dim) + G = current_state @ self.gate[expert_idx,...].T + A = self.act_fn(G) + U = current_state @ self.up[expert_idx,...].T + H = A * U # Element-wise multiplication + current_hidden_states = H @ self.down[expert_idx,...].T * routing_weights_cpu[top_x, idx, None] + # However `index_add_` only support torch tensors for indexing so we'll use + # the `top_x` tensor here. + final_hidden_states.index_add_(0, top_x, current_hidden_states) + + return final_hidden_states.to(org_dtype) + +EXPERTS_MAP = { + "MLPCPUExperts": MLPCPUExperts, + "MLPExpertsTorch": MLPExpertsTorch, + "MLPExpertsMarlin": MLPExpertsMarlin, +} +class KTransformersMLPExpert(BaseInjectedModule, MLPExpertsBase): + def __init__(self, + key: str, + gguf_loader: GGUFLoader, + config: PretrainedConfig, + orig_module: nn.Module, + device: str = "cuda", + prefill_device:str = "cuda", + prefill_mlp_type: str | None = "MLPExpertsTorch", + generate_device: str = "cpu", + generate_mlp_type: str | None = "MLPCPUExperts", + **kwargs): + BaseInjectedModule.__init__(self, key, gguf_loader, config, orig_module, device, **kwargs) + MLPExpertsBase.__init__(self, key, gguf_loader, config, orig_module, device, **kwargs) + if generate_mlp_type is not None: + self.generate_experts = EXPERTS_MAP[generate_mlp_type](key, gguf_loader, config, len(orig_module), device=generate_device, **kwargs) + else: + self.generate_experts = None + if prefill_mlp_type is not None: + self.prefill_experts = EXPERTS_MAP[prefill_mlp_type](key, gguf_loader, config, len(orig_module), device=prefill_device, **kwargs) + else: + self.prefill_experts = None + self.gpu_mlp_type = prefill_mlp_type + self.cpu_mlp_type = generate_mlp_type + self.mode = InferenceState.UNLOAD + + def load(self, w: dict = None, mode: InferenceState = None, warmup: bool = True): + # TODO support w as input + if not mode: mode = InferenceState.GENERATE + if mode == InferenceState.GENERATE: + self.prefill_experts.unload() + self.generate_experts.load(w, warmup=warmup) + self.device = self.generate_experts.device + self.mode = mode + elif mode == InferenceState.PREFILL: + self.generate_experts.unload() + self.prefill_experts.load(w, warmup=warmup) + self.device = self.prefill_experts.device + self.mode = mode + elif mode == InferenceState.UNLOAD: + self.unload() + self.mode = mode + self.device = self.generate_experts.device + else: + raise ValueError("mode must be either InferenceState.GENERATE, InferenceState.PREFILL or InferenceState.UNLOAD") + + def unload(self): + if self.generate_experts is not None: + self.generate_experts.unload() + if self.prefill_experts is not None: + self.prefill_experts.unload() + self.device = self.generate_experts.device + + def forward(self, input_tensor, expert_ids, weights): + if self.mode == InferenceState.GENERATE: + assert self.generate_experts is not None, "generate_experts is None" + return self.generate_experts.forward(input_tensor, expert_ids, weights) + elif self.mode == InferenceState.PREFILL: + assert self.prefill_experts is not None, "prefill_experts is None" + return self.prefill_experts.forward(input_tensor, expert_ids, weights) + else: + raise ValueError("load or set_inference_mode before forward") + + def set_inference_mode(self, mode: InferenceState): + if mode == InferenceState.GENERATE: + self.load(mode=InferenceState.GENERATE, warmup=False) + elif mode == InferenceState.PREFILL: + self.load(mode=InferenceState.PREFILL, warmup=False) + elif mode == InferenceState.UNLOAD: + self.unload() + else: + raise ValueError("mode must be either InferenceState.GENERATE, InferenceState.PREFILL or InferenceState.UNLOAD") + + +from ktransformers.models.modeling_deepseek import DeepseekV2MoE +from ktransformers.models.modeling_qwen2_moe import Qwen2MoeSparseMoeBlock + + +class Qwen2MoeSparseMoeBlockInjected(BaseInjectedModule, Qwen2MoeSparseMoeBlock): + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + """ """ + orig_shape = hidden_states.shape + batch_size, sequence_length, hidden_dim = hidden_states.shape + hidden_states = hidden_states.view(-1, hidden_dim) + # router_logits: (batch * sequence_length, n_experts) + router_logits = self.gate(hidden_states) + + routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) + routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1) + if self.norm_topk_prob: + routing_weights /= routing_weights.sum(dim=-1, keepdim=True) + # we cast back to the input dtype + routing_weights = routing_weights.to(hidden_states.dtype) + + if sequence_length == 1 and hasattr(self.experts.generate_experts, "submit_for_one_decode"): + self.experts.generate_experts.submit_for_one_decode(hidden_states[0], selected_experts[0], routing_weights[0]) + shared_expert_output = self.shared_expert(hidden_states) + shared_expert_output = F.sigmoid(self.shared_expert_gate(hidden_states)) * shared_expert_output + y = self.experts.generate_experts.sync_for_one_decode().unsqueeze(0) + y += shared_expert_output + y.resize_(*orig_shape) + return y, router_logits + + hidden_states_expert = hidden_states.to(self.experts.device) if isinstance(self.experts, MLPExpertsBase) else hidden_states_expert.cpu() + selected_experts_expert = selected_experts.to(self.experts.device) if isinstance(self.experts, MLPExpertsBase) else selected_experts_expert.cpu() + routing_weights_expert = routing_weights.to(self.experts.device) if isinstance(self.experts, MLPExpertsBase) else routing_weights_expert.cpu() + + shared_expert_output = self.shared_expert(hidden_states) + shared_expert_output = ( + F.sigmoid(self.shared_expert_gate(hidden_states)) * shared_expert_output + ) + + if isinstance(self.experts, MLPExpertsBase): + y = ( + self.moe_on_cpuinfer( + hidden_states_expert, selected_experts_expert, routing_weights_expert + ) + .view(*orig_shape) + .to(device=hidden_states.device) + ) + elif hidden_states_expert.size(0) > 10: + y = self.moe_infer( + hidden_states_expert, selected_experts_expert, routing_weights_expert, orig_shape + ).to(device=hidden_states.device) + else: + y = self.moe_infer_simple( + hidden_states_expert, selected_experts_expert, routing_weights_expert + ).to(device=hidden_states.device) + y += shared_expert_output + y.resize_(*orig_shape) + return y, router_logits + + @torch.no_grad() + def moe_on_cpuinfer(self, x: torch.Tensor, topk_ids: torch.Tensor, topk_weight: torch.Tensor) -> torch.Tensor: + outs = torch.empty_like(x) + outs = self.experts(x, topk_ids, topk_weight) + return outs + + @torch.no_grad() + # TODO may bugs here + def moe_infer_simple(self, hidden_states_cpu: torch.Tensor, selected_experts_cpu: torch.Tensor, routing_weights_cpu: torch.Tensor) -> torch.Tensor: + ''' + hidden_states_cpu: [num_tokens, hidden_size] + topk_ids, topk_weight: [num_tokens, num_selected_experts] + ''' + outs = torch.zeros_like(hidden_states_cpu) + for token_idx in range(selected_experts_cpu.size(0)): + for expert_idx in range(selected_experts_cpu.size(1)): + expert = self.experts[selected_experts_cpu[token_idx, expert_idx]] + outs[token_idx] += expert.forward(hidden_states_cpu[token_idx]) * routing_weights_cpu[token_idx, expert_idx] + return outs + + @torch.no_grad() + # TODO may bugs here + def moe_infer(self, hidden_states_cpu: torch.Tensor, selected_experts_cpu: torch.Tensor, routing_weights_cpu: torch.Tensor, orig_shape: tuple) -> torch.Tensor: + + batch_size, sequence_length, hidden_dim = orig_shape + + final_hidden_states = torch.zeros( + (batch_size * sequence_length, hidden_dim), dtype=hidden_states_cpu.dtype, device=hidden_states_cpu.device + ) + + # One hot encode the selected experts to create an expert mask + # this will be used to easily index which expert is going to be sollicitated + expert_mask = torch.nn.functional.one_hot(selected_experts_cpu, num_classes=self.num_experts).permute(2, 1, 0) + + # Loop over all available experts in the model and perform the computation on each expert + for expert_idx in range(self.num_experts): + expert_layer = self.experts[expert_idx] + idx, top_x = torch.where(expert_mask[expert_idx]) + + # Index the correct hidden states and compute the expert hidden state for + # the current expert. We need to make sure to multiply the output hidden + # states by `routing_weights` on the corresponding tokens (top-1 and top-2) + current_state = hidden_states_cpu[None, top_x].reshape(-1, hidden_dim) + current_hidden_states = expert_layer.forward(current_state) * routing_weights_cpu[top_x, idx, None] + + # However `index_add_` only support torch tensors for indexing so we'll use + # the `top_x` tensor here. + final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states_cpu.dtype)) + + return final_hidden_states + + +class DeepseekV2MoEInjected(BaseInjectedModule, DeepseekV2MoE): + def forward(self, hidden_states): + identity = hidden_states + orig_shape = hidden_states.shape + sequence_length = orig_shape[1] + topk_idx, topk_weight, aux_loss = self.gate(hidden_states) + hidden_states = hidden_states.view(-1, hidden_states.shape[-1]) + + if sequence_length == 1: + self.experts.generate_experts.submit_for_one_decode(hidden_states[0], topk_idx[0], topk_weight[0]) + if self.config.n_shared_experts is not None: + y_ = self.shared_experts(identity).squeeze(0) + y = self.experts.generate_experts.sync_for_one_decode().unsqueeze(0) + y += y_ + y.resize_(*orig_shape) + return y + + if self.config.n_shared_experts is not None: + y_ = self.shared_experts(identity).squeeze(0) + + if isinstance(self.experts, MLPExpertsBase): + y = self.moe_on_cpuinfer(hidden_states, topk_idx, topk_weight).view(*orig_shape).to(device=hidden_states.device) + elif hidden_states.size(0) > 10: + # TODO may bugs here + y = ( + self.moe_infer(hidden_states, topk_idx, topk_weight) + .view(*orig_shape) + .to(device=hidden_states.device) + ) + else: + # TODO may bugs here + y = ( + self.moe_infer_simple(hidden_states, topk_idx, topk_weight) + .view(*orig_shape) + .to(device=hidden_states.device) + ) + if self.config.n_shared_experts is not None: + y += y_ + return y + + @torch.no_grad() + def moe_on_cpuinfer(self, x: torch.Tensor, topk_ids: torch.Tensor, topk_weight: torch.Tensor) -> torch.Tensor: + outs = torch.empty_like(x) + outs = self.experts(x, topk_ids, topk_weight) + return outs + + @torch.no_grad() + # TODO may bugs here + def moe_infer_simple( + self, x: torch.Tensor, topk_ids: torch.Tensor, topk_weight: torch.Tensor + ) -> torch.Tensor: + """ + x: [num_tokens, hidden_size] + topk_ids, topk_weight: [num_tokens, num_selected_experts] + """ + outs = torch.zeros_like(x) + for token_idx in range(topk_ids.size(0)): + for expert_idx in range(topk_ids.size(1)): + expert = self.experts[topk_ids[token_idx, expert_idx]] + outs[token_idx] += ( + expert.forward(x[token_idx]) * topk_weight[token_idx, expert_idx] + ) + return outs + + @torch.no_grad() + # TODO may bugs here + def moe_infer(self, x, topk_ids, topk_weight): + cnts = topk_ids.new_zeros((topk_ids.shape[0], len(self.experts))) + cnts.scatter_(1, topk_ids, 1) + tokens_per_expert = cnts.sum(dim=0) + idxs = topk_ids.view(-1).argsort() + sorted_tokens = x[idxs // topk_ids.shape[1]] + tokens_per_expert = tokens_per_expert.cpu().numpy() + + outputs = [] + start_idx = 0 + for i, num_tokens in enumerate(tokens_per_expert): + end_idx = start_idx + num_tokens + if num_tokens == 0: + continue + expert = self.experts[i + self.ep_rank * self.experts_per_rank] + tokens_for_this_expert = sorted_tokens[start_idx:end_idx] + expert_out = expert.forward(tokens_for_this_expert) + outputs.append(expert_out) + start_idx = end_idx + + outs = torch.cat(outputs, dim=0) if len(outputs) else sorted_tokens.new_empty(0) + + new_x = torch.empty_like(outs) + new_x[idxs] = outs + final_out = ( + new_x.view(*topk_ids.shape, -1) + .type(topk_weight.dtype) + .mul_(topk_weight.unsqueeze(dim=-1)) + .sum(dim=1) + .type(new_x.dtype) + ) + return final_out diff --git a/ktransformers/operators/layer_wise_prefill.py b/ktransformers/operators/layer_wise_prefill.py new file mode 100644 index 0000000..61efed8 --- /dev/null +++ b/ktransformers/operators/layer_wise_prefill.py @@ -0,0 +1,700 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Description : +Author : Azure-Tang +Date : 2024-07-25 11:25:24 +Version : 1.0.0 +LastEditors : Azure +LastEditTime : 2024-07-26 09:27:48 +Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +''' + +import inspect +import math +from typing import List, Optional, Tuple, Union +import time +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.cache_utils import Cache, DynamicCache, StaticCache +from transformers.modeling_attn_mask_utils import ( + AttentionMaskConverter, +) +from transformers.modeling_outputs import ( + MoeCausalLMOutputWithPast, + MoeModelOutputWithPast, + SequenceClassifierOutputWithPast, + TokenClassifierOutput, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_2_available, + is_flash_attn_greater_or_equal_2_10, + logging, + replace_return_docstrings, +) +from ktransformers.models.modeling_qwen2_moe import Qwen2MoeSparseMoeBlock, Qwen2MoeMLP, Qwen2MoeDecoderLayer +from ktransformers.models.modeling_deepseek import BaseModelOutputWithPast, DeepseekV2DecoderLayer, DeepseekV2MoE +from transformers.models.qwen2_moe.configuration_qwen2_moe import Qwen2MoeConfig +from ktransformers.operators.base_operator import BaseInjectedModule +from ktransformers.util.utils import InferenceState + +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters) + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "Qwen/Qwen1.5-MoE-A2.7B" +_CONFIG_FOR_DOC = "Qwen2MoeConfig" + +QWEN2MOE_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`Qwen2MoeConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare Qwen2MoE Model outputting raw hidden-states without any specific head on top.", + QWEN2MOE_START_DOCSTRING, +) +class Qwen2MoePreTrainedModel(PreTrainedModel): + config_class = Qwen2MoeConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["Qwen2MoeDecoderLayer"] + _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True + _supports_sdpa = True + _supports_cache_class = True + _supports_static_cache = True + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +QWEN2MOE_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): + Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` + returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. + + Two formats are allowed: + - a [`~cache_utils.Cache`] instance; + - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy + cache format. + + The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the + legacy cache format will be returned. + + If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't + have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` + of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + output_router_logits (`bool`, *optional*): + Whether or not to return the logits of all the routers. They are useful for computing the router loss, and + should not be returned during inference. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): + Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, + this tensor is not affected by padding. It is used to update the cache in the correct position and to infer + the complete sequence length. +""" + +from ktransformers.util.custom_gguf import GGUFLoader +from transformers.configuration_utils import PretrainedConfig +@add_start_docstrings( + "The bare Qwen2MoE Model outputting raw hidden-states without any specific head on top.", + QWEN2MOE_START_DOCSTRING, +) +class Qwen2MoeModelPerLayerPrefill(BaseInjectedModule): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2MoeDecoderLayer`] + + Args: + config: Qwen2MoeConfig + """ + def __init__( + self, + key: str, + gguf_loader : GGUFLoader, + config: PretrainedConfig, + orig_module: nn.Module, + device: str = "cuda", + per_layer_prefill_intput_threshold: int = 30000, # if None, no per-layer prefill + **kwargs, + ): + BaseInjectedModule.__init__(self, key, gguf_loader, config, orig_module, device, **kwargs) + self.per_layer_prefill_intput_threshold = per_layer_prefill_intput_threshold + + @add_start_docstrings_to_model_forward(QWEN2MOE_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_router_logits: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + per_layer_prefill_intput_threshold: int | None = None, # if None or 0, close per-layer prefill + ) -> Union[Tuple, MoeModelOutputWithPast]: + # print(f'Total length of input_ids: {input_ids.size(1)}, {input_ids.size()}') + + if per_layer_prefill_intput_threshold is None: per_layer_prefill_intput_threshold = self.per_layer_prefill_intput_threshold + per_layer_prefill_flag = False + seq_lenth = inputs_embeds.size(1) if inputs_embeds is not None else input_ids.size(1) + if per_layer_prefill_intput_threshold and per_layer_prefill_intput_threshold < seq_lenth: + per_layer_prefill_flag = True + for layer in self.layers: + self.load_layer_to(layer, InferenceState.UNLOAD) + else: + pass + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_router_logits = ( + output_router_logits if output_router_logits is not None else self.config.output_router_logits + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if (input_ids is None) ^ (inputs_embeds is not None): + raise ValueError( + "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one" + ) + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + use_legacy_cache = False + if use_cache and not isinstance(past_key_values, Cache): + use_legacy_cache = True + past_key_values = DynamicCache.from_legacy_cache(past_key_values) + logger.warning_once( + "We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. " + "Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)" + ) + + if inputs_embeds is None: + input_ids = input_ids.to("cpu") + inputs_embeds = self.embed_tokens(input_ids) + inputs_embeds = inputs_embeds.to("cuda") + + if cache_position is None: + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + ) + if position_ids is None: + position_ids = cache_position.unsqueeze(0) + + causal_mask = self._update_causal_mask( + attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions + ) + + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_router_logits = () if output_router_logits else None + next_decoder_cache = None + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + causal_mask, + position_ids, + past_key_values, + output_attentions, + output_router_logits, + use_cache, + cache_position, + ) + else: + if per_layer_prefill_flag: + # print(f"to gpu") + self.load_layer_to(decoder_layer, InferenceState.PREFILL) + torch.cuda.empty_cache() + layer_outputs = decoder_layer( + hidden_states, + attention_mask=causal_mask, + position_ids=position_ids, + past_key_value=past_key_values, + output_attentions=output_attentions, + output_router_logits=output_router_logits, + use_cache=use_cache, + cache_position=cache_position, + ) + if per_layer_prefill_flag: + # print(f"to cpu") + self.load_layer_to(decoder_layer, InferenceState.UNLOAD) + torch.cuda.empty_cache() + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache = layer_outputs[2 if output_attentions else 1] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + if output_router_logits and layer_outputs[-1] is not None: + all_router_logits += (layer_outputs[-1],) + + hidden_states = self.norm(hidden_states) + + + if per_layer_prefill_flag: + per_layer_prefill_flag = False + for layer in self.layers: + self.load_layer_to(layer, InferenceState.GENERATE) + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = None + if use_cache: + next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache + + if not return_dict: + return tuple( + v + for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits] + if v is not None + ) + return MoeModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + router_logits=all_router_logits, + ) + + def load_layer_to(self, layer:Qwen2MoeDecoderLayer, target: InferenceState): + assert isinstance(layer, Qwen2MoeDecoderLayer), "module should be nn.ModuleList of decoder layers" + + # TODO Support restore to original device, not only cuda + device = "cpu" if target == InferenceState.UNLOAD else "cuda" + + # attn + layer.self_attn.q_proj.set_inference_mode(target) + layer.self_attn.k_proj.set_inference_mode(target) + layer.self_attn.v_proj.set_inference_mode(target) + layer.self_attn.o_proj.set_inference_mode(target) + layer.self_attn.rotary_emb = layer.self_attn.rotary_emb.to(device) + + # mlp + if isinstance(layer.mlp, Qwen2MoeSparseMoeBlock): + layer.mlp.gate.set_inference_mode(target) + layer.mlp.experts.set_inference_mode(target) + layer.mlp.shared_expert.gate_proj.set_inference_mode(target) + layer.mlp.shared_expert.up_proj.set_inference_mode(target) + layer.mlp.shared_expert.down_proj.set_inference_mode(target) + layer.mlp.shared_expert.act_fn.to(device) + layer.mlp.shared_expert_gate.to(device) + else: + layer.mlp.gate_proj.set_inference_mode(target) + layer.mlp.up_proj.set_inference_mode(target) + layer.mlp.down_proj.set_inference_mode(target) + layer.mlp.act_fn.to(device) + # layer norm + layer.input_layernorm.to(device) + layer.post_attention_layernorm.to(device) + + +DeepseekV2_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): + Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` + returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. + + Two formats are allowed: + - a [`~cache_utils.Cache`] instance; + - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy + cache format. + + The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the + legacy cache format will be returned. + + If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't + have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` + of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +class DeepseekV2ModelPerLayerPrefill(BaseInjectedModule): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`DeepseekV2DecoderLayer`] + + Args: + config: DeepseekV2Config + """ + def __init__( + self, + key: str, + gguf_loader : GGUFLoader, + config: PretrainedConfig, + orig_module: nn.Module, + device: str = "cuda", + per_layer_prefill_intput_threshold: int = 30000, # if None, no per-layer prefill + **kwargs, + ): + BaseInjectedModule.__init__(self, key, gguf_loader, config, orig_module, device, **kwargs) + self.per_layer_prefill_intput_threshold = per_layer_prefill_intput_threshold + + @add_start_docstrings_to_model_forward(DeepseekV2_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + per_layer_prefill_intput_threshold: int | None = None, # if None, no per-layer prefill + ) -> Union[Tuple, BaseModelOutputWithPast]: + if per_layer_prefill_intput_threshold is None: per_layer_prefill_intput_threshold = self.per_layer_prefill_intput_threshold + per_layer_prefill_flag = False + seq_lenth = inputs_embeds.size(1) if inputs_embeds is not None else input_ids.size(1) + if per_layer_prefill_intput_threshold and per_layer_prefill_intput_threshold < seq_lenth: + per_layer_prefill_flag = True + for layer in self.layers: + self.load_layer_to(layer, InferenceState.UNLOAD) + torch.cuda.empty_cache() + else: + pass + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError( + "You cannot specify both input_ids and inputs_embeds at the same time" + ) + elif input_ids is not None: + batch_size, seq_length = input_ids.shape[:2] + elif inputs_embeds is not None: + batch_size, seq_length = inputs_embeds.shape[:2] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`transformers." + ) + use_cache = False + + past_key_values_length = 0 + if use_cache: + use_legacy_cache = not isinstance(past_key_values, Cache) + if use_legacy_cache: + past_key_values = DynamicCache.from_legacy_cache(past_key_values) + past_key_values_length = past_key_values.get_usable_length(seq_length) + + if cache_position is None: + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + ) + + if position_ids is None: + position_ids = cache_position.unsqueeze(0) + + if inputs_embeds is None: + org_device = input_ids.device + input_ids = input_ids.to("cpu") + inputs_embeds = self.embed_tokens(input_ids) + input_ids = input_ids.to(org_device) + + + causal_mask = self._update_causal_mask( + attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions + ) + + # embed positions + hidden_states = inputs_embeds + if per_layer_prefill_flag: + print(f'Total length of input_ids: {hidden_states.size(1)}') + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = None + + t_gpu = 0 + t_cpu = 0 + t_f = 0 + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + causal_mask, + position_ids, + past_key_values, + output_attentions, + use_cache, + cache_position, + ) + else: + t3 = time.time() + if per_layer_prefill_flag: + # print(f"to gpu") + self.load_layer_to(decoder_layer, InferenceState.PREFILL) + torch.cuda.empty_cache() + t4 = time.time() + layer_outputs = decoder_layer( + hidden_states, + attention_mask=causal_mask, + position_ids=position_ids, + past_key_value=past_key_values, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + ) + t5 = time.time() + if per_layer_prefill_flag: + # print(f"to cpu") + self.load_layer_to(decoder_layer, InferenceState.UNLOAD) + torch.cuda.empty_cache() + t6 = time.time() + t_gpu += t4-t3 + t_cpu += t6-t5 + t_f += t5-t4 + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache = layer_outputs[2 if output_attentions else 1] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + if per_layer_prefill_flag: + t6 = time.time() + # print(f"restore") + per_layer_prefill_flag = False + for layer in self.layers: + self.load_layer_to(layer, InferenceState.GENERATE) + torch.cuda.empty_cache() + t7 = time.time() + + print(f"total time: {t7-t3}, \n layer num{len(self.layers)}, gpu time: {t_gpu}, cpu time: {t_cpu}, forward time: {t_f}, restore time: {t7-t6}") + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = None + if use_cache: + next_cache = ( + next_decoder_cache.to_legacy_cache() + if use_legacy_cache + else next_decoder_cache + ) + if not return_dict: + return tuple( + v + for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] + if v is not None + ) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + def load_layer_to(self, layer: DeepseekV2DecoderLayer, target: InferenceState): + assert isinstance(layer, DeepseekV2DecoderLayer), "module should be nn.ModuleList of decoder layers" + + # TODO Support restore to original device, not only cuda + device = "cpu" if target == InferenceState.UNLOAD else "cuda" + + # TODO Support DFS to auto use {to, set_inference_mode} according to the module type + + # attn + layer.self_attn.to(device) # + + # mlp + if isinstance(layer.mlp, DeepseekV2MoE): + layer.mlp.gate.to(device) + layer.mlp.experts.set_inference_mode(target) + layer.mlp.shared_experts.gate_proj.set_inference_mode(target) + layer.mlp.shared_experts.up_proj.set_inference_mode(target) + layer.mlp.shared_experts.down_proj.set_inference_mode(target) + layer.mlp.shared_experts.act_fn.to(device) + # layer.mlp.shared_expert_gate.to(device) + else: + layer.mlp.gate_proj.set_inference_mode(target) + layer.mlp.up_proj.set_inference_mode(target) + layer.mlp.down_proj.set_inference_mode(target) + layer.mlp.act_fn.to(device) + # layer norm + layer.input_layernorm.to(device) + layer.post_attention_layernorm.to(device) diff --git a/ktransformers/operators/linear.py b/ktransformers/operators/linear.py new file mode 100644 index 0000000..e264323 --- /dev/null +++ b/ktransformers/operators/linear.py @@ -0,0 +1,340 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Description : +Author : Azure-Tang, Boxin Zhang +Date : 2024-07-25 11:25:24 +Version : 0.1.0 +LastEditors : Azure +LastEditTime : 2024-07-26 09:27:53 +Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +''' + + +import torch +from torch import nn +import KTransformersOps +from ktransformers.util.custom_gguf import GGUFLoader +from ktransformers.util.utils import InferenceState +from ktransformers.ktransformers_ext.operators.custom_marlin.quantize.utils.marlin_utils import ( + MarlinWorkspace, + marlin_quantize, + GPTQ_MARLIN_MIN_THREAD_N, + GPTQ_MARLIN_MAX_PARALLEL, +) +from ktransformers.operators.base_operator import BaseInjectedModule +from transformers.configuration_utils import PretrainedConfig +from abc import ABC, abstractmethod + + +#class QuantizedLinearBase(BaseInjectedModule, ABC): +class QuantizedLinearBase(ABC): + def __init__( + self, + key: str, + gguf_loader: GGUFLoader, + config: PretrainedConfig, + orig_module: nn.Module = None, + device: str = "cuda", + **kwargs, + ): + # super().__init__(key, gguf_loader, config, orig_module, device, **kwargs) + super().__init__() + self.key = key + self.gguf_loader = gguf_loader + self.device = device + self.config = config + + self.has_bias = False + self.dtype = torch.get_default_dtype() + if orig_module is not None: + self.in_features = orig_module.in_features + self.out_features = orig_module.out_features + else: + shape = self.gguf_loader.tensor_info[key + ".weight"]["shape"] + if len(shape) == 1: + print("Warning: orig_module is not set, but has in_features or out_features equals to 1, can't get in_features and out_features from GGUF") + self.in_features = self.gguf_loader.tensor_info[key + ".weight"]["shape"][0] + self.out_features = self.gguf_loader.tensor_info[key + ".weight"]["shape"][1] + + @abstractmethod + def forward(self, x: torch.Tensor) -> torch.Tensor: + pass + + def load_weight(self, override_key: str | None = None, device: str | None = None): + if override_key is not None: + keys = override_key + else: + keys = [self.key] + + for key in keys: + if key + ".weight" in self.gguf_loader.tensor_file_map: + if key + ".bias" in self.gguf_loader.tensor_file_map: + tensors = self.load_multi(key, ["weight", "bias"], device=device) + tensor = tensors["weight"] + bias = tensors["bias"] + # self.qtype = GGML_TYPE_QTYPE_MAP[tensorinfo[key + ".weight"]["ggml_type"]] + # print(torch.isinf(tensor).any(), torch.isinf(bias).any()) + return nn.Parameter(tensor), nn.Parameter(bias) + else: + tensors = self.load_multi(key, ["weight"], device=device) + tensor = tensors["weight"] + # self.qtype = GGML_TYPE_QTYPE_MAP[tensorinfo[key + ".weight"]["ggml_type"]] + return nn.Parameter(tensor) + else: + raise FileNotFoundError(f"Weight file not found for key {key}") + + def load_multi(self, key: str, keys: list[str], device: str = "cpu"): + tensors = {} + for k in keys: + tensors[k] = self.gguf_loader.load_gguf_tensor(key + "." + k, device=device) + return tensors + + @abstractmethod + def load(self, w: dict | nn.Parameter | tuple | None = None, device: str|None = "cuda"): + pass + + @abstractmethod + def unload(self): + pass + + +class QuantizedLinearTorch(QuantizedLinearBase): + def __init__( + self, + key: str, + gguf_loader: GGUFLoader, + config: PretrainedConfig, + orig_module: nn.Module = None, + device: str = "cuda", + **kwargs, + ): + super().__init__(key, gguf_loader, config, orig_module, device, **kwargs) + self.has_bias = False + self.dtype = torch.get_default_dtype() + self.w = None + self.has_bias = False + + def forward(self, x: torch.Tensor) -> torch.Tensor: + dtype = x.dtype + out_device = x.device + x = x.to(device=self.device, dtype=self.dtype) + x = x @ self.w + if self.has_bias: + x = x + self.bias + x = x.to(dtype=dtype, device=out_device) + return x + + def load(self, w: dict | nn.Parameter | tuple | None = None, device: str|None = None): + if device is None: device = self.device + if w is None: w = self.load_weight(device=device) + + if isinstance(w, nn.Parameter): + self.w = w.to(dtype=self.dtype).view(self.out_features, self.in_features).T + self.has_bias = False + elif isinstance(w, tuple): + self.w = w[0].to(dtype=self.dtype).view(self.out_features, self.in_features).T + self.bias = w[1].to(dtype=self.dtype) + self.has_bias = True + else: + raise ValueError("Invalid weight type") + # self.linear = self.linear.to(device) + self.w = self.w.to(device) + if self.has_bias: + self.bias = self.bias.to(device) + + def unload(self): + if self.w is not None: + self.w = None + if self.has_bias: + self.bias = None + + +class QuantizedLinearMarlin(QuantizedLinearBase): + marlin_q_w: torch.Tensor + marlin_s: torch.Tensor + g_idx: torch.Tensor + sort_indices: torch.Tensor + has_bias: bool + def __init__( + self, + key: str, + gguf_loader: GGUFLoader, + config: PretrainedConfig, + orig_module: nn.Module = None, + device: str = "cuda", + num_bits: int = 4, # 4-bit/8-bit is supported + group_size: int = 64, # -1, 32, 64, 128 + act_order: bool = False, + is_k_full=True, + **kwargs, + ): + assert device.lower() != "cpu", "Marlin quantized linear only supports GPU device" + super().__init__(key, gguf_loader, config, orig_module, device, **kwargs) + self.num_bits = num_bits + self.group_size = group_size + self.act_order = act_order + self.is_k_full = is_k_full + + def load(self, w: dict | nn.Parameter | tuple | None = None, device: str|None = "cuda"): + if device is None: device = self.device + assert device.lower() != "cpu", "Marlin quantized linear only supports GPU device" + if w is None: w = self.load_weight(device=device) + + if isinstance(w, nn.Parameter): + # pad weight + weight = w.view(self.out_features, self.in_features).T + self.has_bias = False + elif isinstance(w, tuple): + w = list(w) + weight = w[0].view(self.out_features, self.in_features).T + self.bias = w[1] + self.has_bias = True + else: + raise ValueError("Invalid weight type") + weight = weight.to(device) + if self.has_bias: + self.bias = self.bias.to(device) + # Pack Marlin linear + w_ref, marlin_q_w, marlin_s, g_idx, sort_indices, _ = marlin_quantize( + weight, self.num_bits, self.group_size, self.act_order + ) + self.workspace = MarlinWorkspace( + self.out_features, GPTQ_MARLIN_MIN_THREAD_N, GPTQ_MARLIN_MAX_PARALLEL + ) + self.marlin_q_w = marlin_q_w + self.marlin_s = marlin_s + self.g_idx = g_idx + self.sort_indices = sort_indices + self.k = weight.shape[0] + self.n = weight.shape[1] + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # Only support input x as BF16 and FP16 + x = x.to(self.device) + orig_shape = list(x.shape) + orig_dtype = x.dtype + x = x.reshape(-1, x.shape[-1]) + marlin_s = self.marlin_s.to(x.dtype) + x = KTransformersOps.gptq_marlin_gemm( + x, + self.marlin_q_w, + marlin_s, + self.g_idx, + self.sort_indices, + self.workspace.scratch, + self.num_bits, + x.shape[0], + self.n, + x.shape[-1], + self.is_k_full, + ) + if self.has_bias: + x = x + self.bias + orig_shape[-1] = self.n + return x.reshape(orig_shape).to(orig_dtype) + + def unload(self): + + if self.has_bias: + self.bias = None + self.marlin_q_w = None + self.marlin_s = None + self.g_idx = None + self.sort_indices = None + self.workspace = None + +LINEAR_MAP = { + "QuantizedLinearMarlin": QuantizedLinearMarlin, + "QuantizedLinearTorch": QuantizedLinearTorch, + "QuantizedLinearTorch": QuantizedLinearTorch, +} + +class KTransformerLinear(BaseInjectedModule, QuantizedLinearBase): + def __init__( + self, + key: str, + gguf_loader: GGUFLoader, + config: PretrainedConfig, + orig_module: nn.Module, + device: str = "cuda", + generate_device: str = "cuda", + generate_op: str| None = "QuantizedLinearMarlin", + prefill_device: str = "cuda", + prefill_op: str| None = "QuantizedLinearTorch", + **kwargs, + ): + BaseInjectedModule.__init__(self, key, gguf_loader, config, orig_module, device, **kwargs) + QuantizedLinearBase.__init__(self, key, gguf_loader, config, orig_module, device, **kwargs) + # build all the linear operators + if prefill_op is not None: + assert prefill_op in LINEAR_MAP, f"linear_type {prefill_op} not supported" + if prefill_op == "QuantizedLinearMarlin" and (orig_module.in_features%GPTQ_MARLIN_MIN_THREAD_N!=0 or orig_module.out_features%GPTQ_MARLIN_MIN_THREAD_N!=0): + print(f"This linear module's in_features or out_features is not divisible by GPTQ_MARLIN_MIN_THREAD_N({GPTQ_MARLIN_MIN_THREAD_N}), using QuantizedLinearTorch instead.") + print(f"module info: key:{key} orig_module:{orig_module}") + self.prefill_linear = QuantizedLinearTorch(key, gguf_loader, config, orig_module, prefill_device, **kwargs) + else: + self.prefill_linear = LINEAR_MAP[prefill_op](key, gguf_loader, config, orig_module, prefill_device, **kwargs) + else: + self.prefill_linear = None + + if generate_op is not None: + assert generate_op in LINEAR_MAP, f"linear_type {generate_op} not supported" + if generate_op == "QuantizedLinearMarlin" and (orig_module.in_features%GPTQ_MARLIN_MIN_THREAD_N!=0 or orig_module.out_features%GPTQ_MARLIN_MIN_THREAD_N!=0): + print(f"This linear module's in_features or out_features is not divisible by GPTQ_MARLIN_MIN_THREAD_N({GPTQ_MARLIN_MIN_THREAD_N}), using QuantizedLinearTorch instead.") + print(f"module info: key:{key} orig_module:{orig_module}") + self.generate_op = "QuantizedLinearTorch" + self.generate_linear = QuantizedLinearTorch(key, gguf_loader, config, orig_module, generate_device, **kwargs) + else: + self.generate_linear = LINEAR_MAP[generate_op](key, gguf_loader, config, orig_module, generate_device, **kwargs) + else: + self.generate_linear = None + self.device = device + self.mode = InferenceState.UNLOAD + + def forward(self, x): + if self.mode == InferenceState.PREFILL: + assert self.prefill_linear is not None, "cpu linear is not initialized" + return self.prefill_linear.forward(x) + else: + assert self.generate_linear is not None, "gpu linear is not initialized" + return self.generate_linear.forward(x) + + def load(self, w: dict | nn.Parameter | tuple | None = None, mode: InferenceState = InferenceState.GENERATE): + if not mode: + mode = InferenceState.GENERATE + # load to device + if mode == InferenceState.PREFILL: + self.generate_linear.unload() + self.prefill_linear.load(w=w) + self.device = self.prefill_linear.device + elif mode == InferenceState.GENERATE: + self.prefill_linear.unload() + self.generate_linear.load(w=w) + self.device = self.generate_linear.device + elif mode == InferenceState.UNLOAD: + self.prefill_linear.unload() + self.generate_linear.unload() + self.device = "cpu" + else: + raise ValueError("mode must be either InferenceState.GENERATE, InferenceState.PREFILL or InferenceState.UNLOAD") + self.mode = mode + + def unload(self): + if self.prefill_linear is not None: + self.prefill_linear.unload() + if self.generate_linear is not None: + self.generate_linear.unload() + self.device = self.generate_linear.device + + def set_inference_mode(self, mode: InferenceState): + if not mode: + mode = InferenceState.GENERATE + if mode == InferenceState.GENERATE: + self.load(mode=InferenceState.GENERATE) + elif mode == InferenceState.PREFILL: + self.load(mode=InferenceState.PREFILL) + elif mode == InferenceState.UNLOAD: + self.unload() + else: + raise ValueError("mode must be either InferenceState.GENERATE, InferenceState.PREFILL or InferenceState.UNLOAD") diff --git a/ktransformers/optimize/optimize.py b/ktransformers/optimize/optimize.py new file mode 100644 index 0000000..7062166 --- /dev/null +++ b/ktransformers/optimize/optimize.py @@ -0,0 +1,102 @@ +''' +Description : +Author : Boxin Zhang +Version : 0.1.0 +Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +''' +from typing import Mapping, List +import torch +import yaml +import re +from torch import nn +from transformers import AutoConfig +from transformers.configuration_utils import PretrainedConfig +# from operators import BaseInjectedModule +from ktransformers.util.custom_gguf import GGUFLoader, translate_name_to_gguf +from ktransformers.util.utils import set_module, load_weights +import itertools + +def inject(module, local_optimization_dict, model_config:AutoConfig ,gguf_loader:GGUFLoader, prefix=''): + for name, child in module._modules.items(): + if child is not None: + child_prefix = prefix + name + if child_prefix in local_optimization_dict: + inject_module_meta=local_optimization_dict[child_prefix] + if isinstance(inject_module_meta, Mapping): + import_path = inject_module_meta["class"].split(".") + import_module_name = ".".join(import_path[:-1]) + import_class_name = import_path[-1] + module_cls=getattr(__import__(import_module_name, fromlist=[""]), import_class_name) + print(f"Injecting {child_prefix} as", import_module_name, ".", import_class_name) + inject_module=module_cls(key = inject_module_meta["key"], gguf_loader = gguf_loader, config = model_config, orig_module=child, device = inject_module_meta["device"], **inject_module_meta["kwargs"]) + set_module(module, name, inject_module) + elif isinstance(inject_module_meta, str): + assert inject_module_meta=="default", "for str inject_module_meta, only support \"default\"." + else: + raise Exception("inject_module_meta must be a dict or str") + child_prefix += "." + child_optimization_dict = {k: v for k, v in local_optimization_dict.items() if k.startswith(child_prefix)} + inject(child, child_optimization_dict, model_config, gguf_loader, child_prefix) + +def del_meta(module:nn.Module): + #print("default loading weights", prefix) + persistent_buffers = {k: v for k, v in module._buffers.items() if k not in module._non_persistent_buffers_set} + local_name_params = itertools.chain(module._parameters.items(), persistent_buffers.items()) + local_state = {k: v for k, v in local_name_params if v is not None} + for name, param in local_state.items(): + if param.device == "meta" or param.device == torch.device("meta"): + module.__delattr__(name) + for name, child in module._modules.items(): + del_meta(child) + +def gen_optimize_config(module: nn.Module, out_data: Mapping, rule_list: List, prefix: str="", default_device: str = "cuda:0"): + module_name = prefix[:-1] + translated_name = translate_name_to_gguf(prefix)[:-1] + #print("gen_optimize_config", prefix, module_name, translated_name) + recursive = True + for rule in rule_list: + #print(rule) + match_meta = rule["match"] + if "class" in match_meta: + import_path = match_meta["class"].split(".") + import_module_name = ".".join(import_path[:-1]) + import_class_name = import_path[-1] + module_cls=getattr(__import__(import_module_name, fromlist=[""]), import_class_name) + if not isinstance(module, module_cls): + continue + if "name" in match_meta: + if re.search(match_meta["name"], module_name) is None: + continue + replace_meta = rule["replace"] + out_data[module_name]={"key": translated_name, + "class": replace_meta["class"], + "device": replace_meta["device"] if "device" in replace_meta else default_device, + "kwargs": replace_meta["kwargs"] if "kwargs" in replace_meta else dict()} + if "recursive" in rule: + recursive = bool(rule["recursive"]) + + if module_name not in out_data: + out_data[module_name]="default" + + #print(out_data[module_name]) + #input() + + if recursive: + for name, child in module._modules.items(): + if child is not None: + child_prefix = prefix + name + "." + gen_optimize_config(child, out_data, rule_list, child_prefix) + + +def optimize_and_load_gguf(module: nn.Module, rule_file: str, gguf_path: str, model_config: PretrainedConfig, default_device: str = "cuda:0"): + with open(rule_file, 'r', encoding='utf-8') as f: + rule_list = yaml.load(f.read(), Loader=yaml.FullLoader) + + optimize_config = dict() + gen_optimize_config(module, optimize_config, rule_list, default_device = default_device) + + gguf_loader=GGUFLoader(gguf_path) + with torch.device("meta"): + inject(module, optimize_config, model_config, gguf_loader) + load_weights(module, gguf_loader) + del_meta(module) diff --git a/ktransformers/optimize/optimize_rules/DeepSeek-V2-Chat.yaml b/ktransformers/optimize/optimize_rules/DeepSeek-V2-Chat.yaml new file mode 100644 index 0000000..025bd2b --- /dev/null +++ b/ktransformers/optimize/optimize_rules/DeepSeek-V2-Chat.yaml @@ -0,0 +1,41 @@ +- match: + class: ktransformers.models.modeling_deepseek.DeepseekV2YarnRotaryEmbedding + replace: + class: ktransformers.operators.RoPE.YarnRotaryEmbedding +- match: + name: "^model\\.layers\\.(?!.*self_attn).*$" # regular expression + class: torch.nn.Linear # only match modules matching name and class simultaneously + replace: + class: ktransformers.operators.linear.KTransformerLinear # optimized Kernel on quantized data types + kwargs: + generate_device: "cuda" + prefill_device: "cuda" + generate_op: "QuantizedLinearMarlin" + prefill_op: "QuantizedLinearTorch" +- match: + name: "^model\\.layers\\..*\\.mlp$" + class: ktransformers.models.modeling_deepseek.DeepseekV2MoE + replace: + class: ktransformers.operators.experts.DeepseekV2MoEInjected # mlp module with custom forward function +- match: + name: "^model\\.layers\\..*\\.mlp\\.experts$" + replace: + class: ktransformers.operators.experts.KTransformersMLPExpert # custom MoE Kernel with expert paralleism + device: "cpu" # which devices to load this module when initializing + kwargs: + prefill_device: "cuda" + prefill_mlp_type: "MLPExpertsTorch" + generate_device: "cpu" + generate_mlp_type: "MLPCPUExperts" + out_device: "cuda" + recursive: False # don't recursively inject submodules of this module +- match: + name: "^model\\.layers\\..*\\.self_attn$" + replace: + class: ktransformers.operators.attention.DeepseekV2AttentionInjected # optimized MLA implementation +- match: + name: "^model$" + replace: + class: "ktransformers.operators.layer_wise_prefill.DeepseekV2ModelPerLayerPrefill" + kwargs: + per_layer_prefill_intput_threshold: 0 # 0 is close layer wise prefill diff --git a/ktransformers/optimize/optimize_rules/Qwen2-57B-A14B-Instruct.yaml b/ktransformers/optimize/optimize_rules/Qwen2-57B-A14B-Instruct.yaml new file mode 100644 index 0000000..2b4e312 --- /dev/null +++ b/ktransformers/optimize/optimize_rules/Qwen2-57B-A14B-Instruct.yaml @@ -0,0 +1,37 @@ +- match: + class: ktransformers.models.modeling_qwen2_moe.Qwen2MoeRotaryEmbedding + replace: + class: ktransformers.operators.RoPE.RotaryEmbedding +- match: + name: "^model\\.layers\\..*$" # regular expression + class: torch.nn.Linear # only match modules matching name and class simultaneously + replace: + class: ktransformers.operators.linear.KTransformerLinear # optimized Kernel on quantized data types + kwargs: + generate_device: "cuda" + prefill_device: "cuda" + generate_op: "QuantizedLinearMarlin" + prefill_op: "QuantizedLinearTorch" +- match: + name: "^model\\.layers\\..*\\.mlp$" + class: ktransformers.models.modeling_qwen2_moe.Qwen2MoeSparseMoeBlock + replace: + class: ktransformers.operators.experts.Qwen2MoeSparseMoeBlockInjected # mlp module with custom forward function +- match: + name: "^model\\.layers\\..*\\.mlp\\.experts$" + replace: + class: ktransformers.operators.experts.KTransformersMLPExpert # custom MoE Kernel with expert paralleism + device: "cpu" # which devices to load this module when initializing + kwargs: + prefill_device: "cuda" + prefill_mlp_type: "MLPExpertsTorch" + generate_device: "cpu" + generate_mlp_type: "MLPCPUExperts" + out_device: "cuda" + recursive: False # don't recursively inject submodules of this module +- match: + name: "^model$" + replace: + class: "ktransformers.operators.layer_wise_prefill.Qwen2MoeModelPerLayerPrefill" + kwargs: + per_layer_prefill_intput_threshold: 0 # 0 is close layer wise prefill diff --git a/ktransformers/server/__init__.py b/ktransformers/server/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ktransformers/server/api/__init__.py b/ktransformers/server/api/__init__.py new file mode 100644 index 0000000..d69bc0b --- /dev/null +++ b/ktransformers/server/api/__init__.py @@ -0,0 +1,10 @@ +from fastapi import APIRouter + +from .ollama import router as ollama_router +from .openai import router as openai_router,post_db_creation_operations +from .web import router as web_router + +router = APIRouter() +router.include_router(ollama_router) +router.include_router(openai_router) +router.include_router(web_router) diff --git a/ktransformers/server/api/ollama/__init__.py b/ktransformers/server/api/ollama/__init__.py new file mode 100644 index 0000000..7efab95 --- /dev/null +++ b/ktransformers/server/api/ollama/__init__.py @@ -0,0 +1,6 @@ +from fastapi import APIRouter + +from .completions import router as completions_router + +router = APIRouter() +router.include_router(completions_router) diff --git a/ktransformers/server/api/ollama/completions.py b/ktransformers/server/api/ollama/completions.py new file mode 100644 index 0000000..d0eb346 --- /dev/null +++ b/ktransformers/server/api/ollama/completions.py @@ -0,0 +1,160 @@ +from datetime import datetime +from http.client import NOT_IMPLEMENTED +import json +from time import time +from uuid import uuid4 +from typing import List, Optional + +from fastapi import APIRouter, Request +from pydantic import BaseModel, Field + +from ktransformers.server.config.config import Config +from ktransformers.server.utils.create_interface import get_interface +from ktransformers.server.schemas.assistants.streaming import check_link_response +from ktransformers.server.backend.base import BackendInterfaceBase +router = APIRouter(prefix='/api') + + +# https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-completion +class OllamaGenerateCompletionRequest(BaseModel): + model: str = Field(..., description="The model name, which is required.") + prompt: Optional[str] = Field( + None, description="The prompt to generate a response for.") + images: Optional[List[str]] = Field( + None, description="A list of base64-encoded images for multimodal models such as llava.") + # Advanced parameters + format: Optional[str] = Field( + None, description="The format to return a response in, accepted value is json.") + options: Optional[dict] = Field( + None, description="Additional model parameters as listed in the documentation.") + system: Optional[str] = Field( + None, description="System message to override what is defined in the Modelfile.") + template: Optional[str] = Field( + None, description="The prompt template to use, overriding what is defined in the Modelfile.") + context: Optional[str] = Field( + None, description="The context parameter from a previous request to keep a short conversational memory.") + stream: Optional[bool] = Field( + None, description="If false, the response will be returned as a single response object.") + raw: Optional[bool] = Field( + None, description="If true, no formatting will be applied to the prompt.") + keep_alive: Optional[str] = Field( + "5m", description="Controls how long the model will stay loaded into memory following the request.") + + +class OllamaGenerationStreamResponse(BaseModel): + model: str + created_at: str + response: str + done: bool = Field(...) + + +class OllamaGenerationResponse(BaseModel): + pass + + +@router.post("/generate", tags=['ollama']) +async def generate(request: Request, input: OllamaGenerateCompletionRequest): + id = str(uuid4()) + + interface: BackendInterfaceBase = get_interface() + print(f'COMPLETION INPUT:----\n{input.prompt}\n----') + + config = Config() + + if input.stream: + async def inner(): + async for token in interface.inference(input.prompt,id): + d = OllamaGenerationStreamResponse(model=config.model_name,created_at=str(datetime.now()),response=token,done=False) + yield d.model_dump_json()+'\n' + # d = {'model':config.model_name,'created_at':"", 'response':token,'done':False} + # yield f"{json.dumps(d)}\n" + # d = {'model':config.model_name,'created_at':"", 'response':'','done':True} + # yield f"{json.dumps(d)}\n" + d = OllamaGenerationStreamResponse(model=config.model_name,created_at=str(datetime.now()),response='',done=True) + yield d.model_dump_json()+'\n' + return check_link_response(request,inner()) + else: + raise NotImplementedError + +# https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-chat-completion + + +class OllamaChatCompletionRequest(BaseModel): + pass + + +class OllamaChatCompletionStreamResponse(BaseModel): + pass + + +class OllamaChatCompletionResponse(BaseModel): + pass + + +@router.post("/chat", tags=['ollama']) +async def chat(request: Request, input: OllamaChatCompletionRequest): + raise NotImplementedError + + +# https://github.com/ollama/ollama/blob/main/docs/api.md#list-local-models +class OllamaModel(BaseModel): + name: str + modified_at: str + size: int + # TODO: fill the rest correctly + + +# mock ollama +@router.get("/tags",tags=['ollama']) +async def tags(): + config = Config() + # TODO: fill this correctly, although it does not effect Tabby + return {"models": [OllamaModel(name=config.model_name, modified_at="123", size=123)]} + +class OllamaModelInfo(BaseModel): + # TODO: fill this correctly + pass + +class OllamaShowRequest(BaseModel): + name: str = Field(..., description="Name of the model to show") + verbose: Optional[bool] = Field( + None, description="If set to true, returns full data for verbose response fields") + +class OllamaShowDetial(BaseModel): + parent_model: str + format: str + family: str + families: List[str] + parameter_size: str + quantization_level: str + +class OllamaShowResponse(BaseModel): + modelfile: str + parameters: str + template: str + details: OllamaShowDetial + model_info: OllamaModelInfo + + + + +@router.post("/show", tags=['ollama']) +async def show(request: Request, input: OllamaShowRequest): + config = Config() + # TODO: Add more info in config to return, although it does not effect Tabby + return OllamaShowResponse( + modelfile = "# Modelfile generated by ...", + parameters = " ", + template = " ", + details = OllamaShowDetial( + parent_model = " ", + format = "gguf", + family = " ", + families = [ + " " + ], + parameter_size = " ", + quantization_level = " " + ), + model_info = OllamaModelInfo() + ) \ No newline at end of file diff --git a/ktransformers/server/api/openai/__init__.py b/ktransformers/server/api/openai/__init__.py new file mode 100644 index 0000000..810b4f8 --- /dev/null +++ b/ktransformers/server/api/openai/__init__.py @@ -0,0 +1,15 @@ +from fastapi import APIRouter + +from .assistants import router as assistants_router,create_default_assistant +from .endpoints.chat import router as chat_router +from .legacy import router as legacy_router + +router = APIRouter(prefix='/v1') + + +router.include_router(assistants_router) +router.include_router(chat_router) +router.include_router(legacy_router) + +def post_db_creation_operations(): + create_default_assistant() diff --git a/ktransformers/server/api/openai/assistants/__init__.py b/ktransformers/server/api/openai/assistants/__init__.py new file mode 100644 index 0000000..a5b652f --- /dev/null +++ b/ktransformers/server/api/openai/assistants/__init__.py @@ -0,0 +1,14 @@ +from fastapi import APIRouter + +from .assistants import router as assistants_router, create_default_assistant +from .messages import router as messages_router +from .runs import router as runs_router +from .threads import router as threads_router + +router = APIRouter() + +threads_router.include_router(runs_router) +threads_router.include_router(messages_router) + +router.include_router(assistants_router) +router.include_router(threads_router) diff --git a/ktransformers/server/api/openai/assistants/assistants.py b/ktransformers/server/api/openai/assistants/assistants.py new file mode 100644 index 0000000..415c593 --- /dev/null +++ b/ktransformers/server/api/openai/assistants/assistants.py @@ -0,0 +1,103 @@ +from typing import Optional + +from fastapi import APIRouter +from fastapi.testclient import TestClient + +from ktransformers.server.crud.assistants.assistants import AssistantDatabaseManager +from ktransformers.server.crud.assistants.runs import RunsDatabaseManager +from ktransformers.server.schemas.assistants.assistants import AssistantCreate, AssistantModify, ObjectID, AssistantBuildStatus, AssistantObject +from ktransformers.server.schemas.base import DeleteResponse, Order +from ktransformers.server.config.log import logger + + +router = APIRouter(prefix="/assistants") +assistant_manager = AssistantDatabaseManager() +runs_manager = RunsDatabaseManager() + + +@router.post("/", tags=['openai']) +async def create_assistant( + assistant: AssistantCreate, +): + return assistant_manager.db_create_assistant(assistant).as_api_response() + + +@router.get("/", tags=['openai']) +async def list_assistants( + limit: Optional[int] = 20, + order: Order = Order.DESC, + after: Optional[str] = None, + before: Optional[str] = None, +): + return [assistant.as_api_response() for assistant in assistant_manager.db_list_assistants(limit, order)] + +# list assistant with status + + +@router.get("/status", tags=['openai-ext']) +async def list_assistants_with_status( + limit: Optional[int] = 20, + order: Order = Order.DESC, + after: Optional[str] = None, + before: Optional[str] = None, +): + return assistant_manager.db_list_assistants(limit, order) + + +@router.get("/{assistant_id}", tags=['openai']) +async def retrieve_assistant( + assistant_id: str, +): + return assistant_manager.db_get_assistant_by_id(assistant_id).as_api_response() + + +@router.post("/{assistant_id}", tags=['openai']) +async def modify_assistant( + assistant_id: str, + assistant: AssistantModify, +): + return assistant_manager.db_update_assistant_by_id(assistant_id, assistant).as_api_response() + + +@router.delete("/{assistant_id}", tags=['openai'], response_model=DeleteResponse) +async def delete_assistant(assistant_id: str): + assistant_manager.db_delete_assistant_by_id(assistant_id) + return DeleteResponse(id=assistant_id, object="assistant.deleted") + + +@router.get("/{assistant_id}/related_thread", tags=['openai']) +async def get_related_thread(assistant_id: ObjectID): + assistant = assistant_manager.db_get_assistant_by_id(assistant_id) + return assistant.get_related_threads_ids() + + +def create_default_assistant(): + logger.info('Creating default assistant') + if assistant_manager.db_count_assistants() == 0: + default_assistant = assistant_manager.db_create_assistant(AssistantCreate(name="KT Assistant", + model="default model", + instructions="""You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. """ + + """Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. """ + + """Please ensure that your responses are socially unbiased and positive in nature.""")) + default_assistant.build_status.status = AssistantBuildStatus.Status.completed + default_assistant.sync_db() + + +# unit test +client = TestClient(router) + + +def test_create_assistant(): + ass_create = AssistantCreate(model="awesome model", instructions="hello") + + res = client.post("/", json=ass_create.model_dump(mode="json")) + + assert res.status_code == 200 + assistant = AssistantObject.model_validate(res.json()) + + assert assistant.model == ass_create.model + assert assistant.instructions == ass_create.instructions + + res = client.get(f"/{assistant.id}") + ass1 = AssistantObject.model_validate(res.json()) + assert assistant == ass1 diff --git a/ktransformers/server/api/openai/assistants/messages.py b/ktransformers/server/api/openai/assistants/messages.py new file mode 100644 index 0000000..80c7569 --- /dev/null +++ b/ktransformers/server/api/openai/assistants/messages.py @@ -0,0 +1,54 @@ +from typing import List, Optional + +from fastapi import APIRouter + +from ktransformers.server.exceptions import not_implemented +from ktransformers.server.schemas.assistants.messages import MessageCreate, MessageObject, MessageModify +from ktransformers.server.crud.assistants.messages import MessageDatabaseManager +from ktransformers.server.schemas.base import DeleteResponse, ObjectID, Order +from ktransformers.server.backend.base import ThreadContext +from ktransformers.server.utils.create_interface import get_thread_context_manager +router = APIRouter() +message_manager = MessageDatabaseManager() + + +@router.post("/{thread_id}/messages", tags=['openai'], response_model=MessageObject) +async def create_message(thread_id: str, msg: MessageCreate): + message = message_manager.db_create_message( + thread_id, msg, MessageObject.Status.in_progress) + ctx: Optional[ThreadContext] = await get_thread_context_manager().get_context_by_thread_id(thread_id) + if ctx is not None: + ctx.put_user_message(message) + return message + + +@router.get("/{thread_id}/messages", tags=['openai'], response_model=List[MessageObject]) +async def list_messages( + thread_id: str, + limit: Optional[int] = 20, + order: Order = Order.DESC, + after: Optional[str] = None, + before: Optional[str] = None, + run_id: Optional[str] = None, +): + return message_manager.db_list_messages_of_thread(thread_id, limit, order) + + +@router.get("/{thread_id}/messages/{message_id}", tags=['openai'], response_model=MessageObject) +async def retrieve_message(thread_id: ObjectID, message_id: ObjectID): + return message_manager.db_get_message_by_id(thread_id, message_id) + + +@router.post("/{thread_id}/messages/{message_id}", tags=['openai'], response_model=MessageObject) +async def modify_message(thread_id: ObjectID, message_id: ObjectID, msg: MessageModify): + #raise not_implemented('modify message not implemented') + raise not_implemented('modify message') + + +@router.delete("/{thread_id}/messages/{message_id}", tags=['openai'], response_model=DeleteResponse) +async def delete_message(thread_id: ObjectID, message_id: ObjectID): + ctx: Optional[ThreadContext] = await get_thread_context_manager().get_context_by_thread_id(thread_id) + if ctx is not None: + ctx.delete_user_message(message_id) + message_manager.db_delete_message_by_id(thread_id, message_id) + return DeleteResponse(id=message_id, object='thread.message.deleted') diff --git a/ktransformers/server/api/openai/assistants/runs.py b/ktransformers/server/api/openai/assistants/runs.py new file mode 100644 index 0000000..f5e51f9 --- /dev/null +++ b/ktransformers/server/api/openai/assistants/runs.py @@ -0,0 +1,99 @@ +from typing import List, Optional + +from fastapi import APIRouter, Request + +from ktransformers.server.crud.assistants.runs import RunsDatabaseManager +from ktransformers.server.backend.base import ThreadContext +from ktransformers.server.schemas.assistants.runs import RunCreate,RunObject,RunThreadCreate,RunModify,RunSubmit +from ktransformers.server.schemas.assistants.streaming import api_stream_response +from ktransformers.server.utils.create_interface import get_thread_context_manager +from ktransformers.server.schemas.base import Order +from ktransformers.server.config.log import logger +from ktransformers.server.exceptions import internal_server_error + + +router = APIRouter() +runs_manager = RunsDatabaseManager() + + +@router.post("/{thread_id}/runs",tags=['openai']) +async def create_run(request: Request, thread_id: str, run_create: RunCreate): + if run_create.stream: + async def inner(): + run = runs_manager.db_create_run(thread_id, run_create) + yield run.stream_response_with_event(event=RunObject.Status.created) + + ctx: ThreadContext = await get_thread_context_manager().get_context_by_run_object(run) + + async for event in ctx.work(): + yield event + return api_stream_response(request, inner()) + else: + run = runs_manager.db_create_run(thread_id, run_create) + ctx: ThreadContext = await get_thread_context_manager().get_context_by_run_object(run) + async for event in ctx.work(): + pass + return run + + +@router.post("/runs",tags=['openai'], response_model=RunObject) +async def create_thread_and_run(run_thread: RunThreadCreate): + raise NotImplementedError + + +@router.get("/{thread_id}/runs",tags=['openai'], response_model=List[RunObject]) +async def list_runs( + thread_id: str, + limit: Optional[int] = 20, + order: Optional[Order] = Order.DESC, + after: Optional[str] = None, + before: Optional[str] = None, +): + raise NotImplementedError + + +@router.get("/{thread_id}/runs/{run_id}",tags=['openai'], response_model=RunObject) +async def retrieve_run( + thread_id: str, + run_id: str, +): + runobj= runs_manager.db_get_run(run_id) + assert runobj.thread_id == thread_id + return runobj + + + +@router.post("/{thread_id}/runs/{run_id}",tags=['openai'], response_model=RunObject) +async def modify_run( + thread_id: str, + run_id: str, + run: RunModify, +): + raise NotImplementedError + + +@router.post("/{thread_id}/runs/{run_id}/submit_tool_outputs", tags=['openai'],response_model=RunObject) +async def submit_tool_outputs_to_run(thread_id: str, run_id: str, submit: RunSubmit): + raise NotImplementedError + + +@router.post("/{thread_id}/runs/{run_id}/cancel",tags=['openai'], response_model=RunObject) +async def cancel_run(thread_id: str, run_id: str): + ctx: ThreadContext = await get_thread_context_manager().get_context_by_thread_id(thread_id) + if ctx is not None: + if ctx.run is None: + logger.warn(f'Run {ctx.run.id} is expected to be in_progress, but no context is found') + raise internal_server_error('ctx do not have run') + + if ctx.run.id == run_id: + logger.info(f'Cancelling thread: {thread_id} and run: {run_id}') + ctx.run.stream_response_with_event(RunObject.Status.cancelling) + return ctx.run + else: + run = runs_manager.db_get_run(run_id) + logger.info(f'Run {run_id} not in this thread context') + return run + else: + run = runs_manager.db_get_run(run_id) + logger.info(f'Run {run_id} not in context manager') + return run diff --git a/ktransformers/server/api/openai/assistants/threads.py b/ktransformers/server/api/openai/assistants/threads.py new file mode 100644 index 0000000..684a0ad --- /dev/null +++ b/ktransformers/server/api/openai/assistants/threads.py @@ -0,0 +1,36 @@ +from typing import List,Optional +from fastapi import APIRouter + +from ktransformers.server.crud.assistants.threads import ThreadsDatabaseManager,Order,ObjectID +from ktransformers.server.schemas.assistants.threads import ThreadObject,ThreadCreate,ThreadModify +from ktransformers.server.schemas.base import DeleteResponse +from ktransformers.server.schemas.conversation import ThreadPreview + +router = APIRouter(prefix='/threads') +threads_manager = ThreadsDatabaseManager() + + +@router.post("/",tags=['openai'], response_model=ThreadObject) +async def create_thread(thread: ThreadCreate): + return threads_manager.db_create_thread(thread) + + +@router.get("/", tags=['openai-ext'],response_model=List[ThreadPreview]) +async def list_threads(limit: Optional[int] = 20, order: Order = Order.DESC): + return threads_manager.db_list_threads_preview(limit, order) + + +@router.get("/{thread_id}",tags=['openai'], response_model=ThreadObject) +async def retrieve_thread(thread_id: ObjectID): + return threads_manager.db_get_thread_by_id(thread_id) + + +@router.post("/{thread_id}",tags=['openai'], response_model=ThreadObject) +async def modify_thread(thread_id: ObjectID, thread: ThreadModify): + raise NotImplementedError + + +@router.delete("/{thread_id}",tags=['openai'], response_model=DeleteResponse) +async def delete_thread(thread_id: ObjectID): + threads_manager.db_delete_thread_by_id(thread_id=thread_id) + return DeleteResponse(id=thread_id, object='thread.deleted') diff --git a/ktransformers/server/api/openai/endpoints/__init__.py b/ktransformers/server/api/openai/endpoints/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ktransformers/server/api/openai/endpoints/chat.py b/ktransformers/server/api/openai/endpoints/chat.py new file mode 100644 index 0000000..8fb7de9 --- /dev/null +++ b/ktransformers/server/api/openai/endpoints/chat.py @@ -0,0 +1,34 @@ +import json +from time import time +from uuid import uuid4 +from fastapi import APIRouter +from fastapi.requests import Request +from ktransformers.server.utils.create_interface import get_interface +from ktransformers.server.schemas.assistants.streaming import chat_stream_response +from ktransformers.server.schemas.endpoints.chat import ChatCompletionCreate,ChatCompletionChunk,ChatCompletionObject +from ktransformers.server.backend.base import BackendInterfaceBase + +router = APIRouter() + + +@router.post('/chat/completions',tags=['openai']) +async def chat_completion(request:Request,create:ChatCompletionCreate): + id = str(uuid4()) + + interface: BackendInterfaceBase = get_interface() + # input_ids = interface.format_and_tokenize_input_ids(id,messages=create.get_tokenizer_messages()) + + input_message = [json.loads(m.model_dump_json()) for m in create.messages] + + if create.stream: + async def inner(): + chunk = ChatCompletionChunk(id=id,object='chat.completion.chunk',created=int(time())) + async for token in interface.inference(input_message,id): + chunk.set_token(token) + yield chunk + return chat_stream_response(request,inner()) + else: + comp = ChatCompletionObject(id=id,object='chat.completion.chunk',created=int(time())) + async for token in interface.inference(input_message,id): + comp.append_token(token) + return comp diff --git a/ktransformers/server/api/openai/legacy/__init__.py b/ktransformers/server/api/openai/legacy/__init__.py new file mode 100644 index 0000000..08039a2 --- /dev/null +++ b/ktransformers/server/api/openai/legacy/__init__.py @@ -0,0 +1,6 @@ +from fastapi import APIRouter + +from . import completions + +router = APIRouter() +router.include_router(completions.router) \ No newline at end of file diff --git a/ktransformers/server/api/openai/legacy/completions.py b/ktransformers/server/api/openai/legacy/completions.py new file mode 100644 index 0000000..be85a29 --- /dev/null +++ b/ktransformers/server/api/openai/legacy/completions.py @@ -0,0 +1,33 @@ +import json +from time import time +from uuid import uuid4 +from fastapi import APIRouter +from fastapi.requests import Request +from ktransformers.server.utils.create_interface import get_interface +from ktransformers.server.schemas.assistants.streaming import stream_response +from ktransformers.server.schemas.legacy.completions import CompletionCreate,CompletionObject + +router = APIRouter() + +@router.post("/completions",tags=['openai']) +async def create_completion(request:Request,create:CompletionCreate): + id = str(uuid4()) + + interface = get_interface() + print(f'COMPLETION INPUT:----\n{create.prompt}\n----') + + + + if create.stream: + async def inner(): + async for token in interface.inference(create.prompt,id): + d = {'choices':[{'delta':{'content':token}}]} + yield f"data:{json.dumps(d)}\n\n" + d = {'choices':[{'delta':{'content':''},'finish_reason':''}]} + yield f"data:{json.dumps(d)}\n\n" + return stream_response(request,inner()) + else: + comp = CompletionObject(id=id,object='text_completion',created=int(time())) + async for token in interface.inference(create.prompt,id): + comp.append_token(token) + return comp diff --git a/ktransformers/server/api/web/__init__.py b/ktransformers/server/api/web/__init__.py new file mode 100644 index 0000000..befa721 --- /dev/null +++ b/ktransformers/server/api/web/__init__.py @@ -0,0 +1,6 @@ +from fastapi import APIRouter +from .system import router as system_router + + +router = APIRouter() +router.include_router(system_router) diff --git a/ktransformers/server/api/web/system.py b/ktransformers/server/api/web/system.py new file mode 100644 index 0000000..78e27a7 --- /dev/null +++ b/ktransformers/server/api/web/system.py @@ -0,0 +1,9 @@ +from fastapi import APIRouter + + +router = APIRouter() + + +@router.get('/system-info',tags=['web']) +def system_info(): + raise NotImplementedError diff --git a/ktransformers/server/backend/__init__.py b/ktransformers/server/backend/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ktransformers/server/backend/args.py b/ktransformers/server/backend/args.py new file mode 100644 index 0000000..e16e914 --- /dev/null +++ b/ktransformers/server/backend/args.py @@ -0,0 +1,97 @@ +from pydantic import BaseModel,Field +from typing import Optional +from ktransformers.server.config.config import Config + + +class ConfigArgs(BaseModel): + model_name : Optional[str] = Field(..., description="Model name") + model_dir: Optional[str] = Field(..., description="Path to model directory") + optimize_config_path: Optional[str] = Field('./KTransformers/optimize_config/DeepSeek-V2-Chat.json', description="Path of your optimize config json file") + gguf_path: Optional[str] = Field('/models/DeepSeek-Coder-V2-Instruct-GGUF/DeepSeek-Coder-V2-Instruct-Q4_K_M.gguf', description="Path of your gguf file") + class Config: + protected_namespaces = () + + paged : bool = Field(True,description='Wether to use paged attention kv cache') + + # total_context: int = Field(16384, description="Total number of tokens to allocate space for. This is not the max_seq_len supported by the model but the total to distribute dynamically over however many jobs are active at once") + total_context: int = Field(2**18, description="Total number of tokens to allocate space for. This is not the max_seq_len supported by the model but the total to distribute dynamically over however many jobs are active at once") + max_batch_size: int = Field(20 if paged else 1, description="Max number of batches to run at once, assuming the sequences will fit within total_context") + max_chunk_size: int = Field(2048, description="Max chunk size. Determines the size of prefill operations. Can be reduced to reduce pauses whenever a new job is started, but at the expense of overall prompt ingestion speed") + max_new_tokens: int = Field(500, description="Max new tokens per completion. For this example applies to all jobs") + json_mode: bool = Field(False, description="Use LMFE to constrain the output to JSON format. See schema and details below") + healing: bool = Field(False, description="Demonstrate token healing") + ban_strings: Optional[list] = Field(None, description="Ban some phrases maybe") + + gpu_split: Optional[str] = Field(None, description='"auto", or VRAM allocation per GPU in GB') + length: Optional[int] = Field(None, description="Maximum sequence length") + rope_scale: Optional[float] = Field(None, description="RoPE scaling factor") + rope_alpha: Optional[float] = Field(None, description="RoPE alpha value (NTK)") + no_flash_attn: bool = Field(False, description="Disable Flash Attention") + low_mem: bool = Field( + False, + description="Enable VRAM optimizations, potentially trading off speed", + ) + experts_per_token: Optional[int] = Field( + None, + description="Override MoE model's default number of experts per token", + ) + load_q4: bool = Field(False, description="Load weights in Q4 mode") + fast_safetensors: bool = Field( + False, + description="Optimized safetensors loading with direct I/O (experimental!)", + ) + draft_model_dir: Optional[str] = Field(None, description="Path to draft model directory") + no_draft_scale: bool = Field( + False, + description="If draft model has smaller context size than model, don't apply alpha (NTK) scaling to extend it", + ) + modes: bool = Field(False, description="List available modes and exit.") + mode: str = Field( + "llama", + description="Chat mode. Use llama for Llama 1/2 chat finetunes.", + ) + username: str = Field("User", description="Username when using raw chat mode") + botname: str = Field("Chatbort", description="Bot name when using raw chat mode") + system_prompt: Optional[str] = Field(None, description="Use custom system prompt") + temperature: float = Field(0.95, description="Sampler temperature, default = 0.95 (1 to disable)") + smoothing_factor: float = Field(0.0, description="Smoothing Factor, default = 0.0 (0 to disable)") + dynamic_temperature: Optional[str] = Field( + None, + description="Dynamic temperature min,max,exponent, e.g. -dyntemp 0.2,1.5,1", + ) + top_k: int = Field(50, description="Sampler top-K, default = 50 (0 to disable)") + top_p: float = Field(0.8, description="Sampler top-P, default = 0.8 (0 to disable)") + top_a: float = Field(0.0, description="Sampler top-A, default = 0.0 (0 to disable)") + skew: float = Field(0.0, description="Skew sampling, default = 0.0 (0 to disable)") + typical: float = Field( + 0.0, + description="Sampler typical threshold, default = 0.0 (0 to disable)", + ) + repetition_penalty: float = Field( + 1.01, + description="Sampler repetition penalty, default = 1.01 (1 to disable)", + ) + frequency_penalty: float = Field( + 0.0, + description="Sampler frequency penalty, default = 0.0 (0 to disable)", + ) + presence_penalty: float = Field( + 0.0, + description="Sampler presence penalty, default = 0.0 (0 to disable)", + ) + max_response_tokens: int = Field(300, description="Max tokens per response, default = 1000") + response_chunk: int = Field(250, description="Space to reserve in context for reply, default = 250") + no_code_formatting: bool = Field(False, description="Disable code formatting/syntax highlighting") + cache_8bit: bool = Field(False, description="Use 8-bit (FP8) cache") + cache_q4: bool = Field(True, description="Use Q4 cache") + ngram_decoding: bool = Field(False, description="Use n-gram speculative decoding") + print_timings: bool = Field(False, description="Output timings after each prompt") + amnesia: bool = Field(False, description="Forget context after every response") + + # for transformers + batch_size :int = Field(1,description="Batch Size") + cache_lens:int = Field(4096, description="Cache lens for transformers static cache") + device:str = Field('cuda:2',description="device") + +cfg = Config() +default_args = ConfigArgs(model_name=cfg.model_name,model_dir=cfg.model_path) diff --git a/ktransformers/server/backend/base.py b/ktransformers/server/backend/base.py new file mode 100644 index 0000000..4cbcdfa --- /dev/null +++ b/ktransformers/server/backend/base.py @@ -0,0 +1,162 @@ +from asyncio import Queue +from enum import Enum +import sys, os +from typing import AsyncIterator, Dict, List, Optional, Tuple + +import torch + +from ktransformers.server.config.log import logger +from ktransformers.server.crud.assistants.assistants import AssistantDatabaseManager +from ktransformers.server.crud.assistants.messages import MessageDatabaseManager +from ktransformers.server.crud.assistants.runs import RunsDatabaseManager +from ktransformers.server.crud.assistants.threads import ThreadsDatabaseManager +from ktransformers.server.exceptions import request_error +from ktransformers.server.schemas.assistants.assistants import AssistantObject +from ktransformers.server.schemas.assistants.messages import MessageCreate, MessageObject, Role +from ktransformers.server.schemas.assistants.runs import RunObject +from ktransformers.server.schemas.assistants.threads import ThreadObject +from ktransformers.server.schemas.base import ObjectID, Order +from ktransformers.server.utils.multi_timer import Profiler + + +from .args import ConfigArgs,default_args + + + +class BackendInterfaceBase: + ''' + Interface to inference frameworks. e.g. transformers, exllama. + Implement __init__ and work + ''' + + args: ConfigArgs + profiler:Profiler = Profiler() + + def __init__(self, args:ConfigArgs = default_args): + raise NotImplementedError + + + async def inference(self,local_messages,request_unique_id:Optional[str])->AsyncIterator[str]: + ''' + work can be called directly, or by ThreadContext + + local_messages: + When called by ThreadContext, local_messages are generated by ThreadContext.get_local_messages(). + Please deal with different local_messages + request_unique_id: + unique id of different requests, useful when using cache + + return: + async str output for stream update + + ''' + raise NotImplementedError + + + def report_last_time_performance(self): + try: + tokenize_time = self.profiler.get_timer_sec('tokenize') + prefill_time = self.profiler.get_timer_sec('prefill') + decode_time = self.profiler.get_timer_sec('decode') + prefill_count = self.profiler.get_counter('prefill') + decode_count = self.profiler.get_counter('decode') + + logger.info(f'Performance(T/s): prefill {prefill_count/prefill_time}, decode {decode_count/decode_time}. Time(s): tokenize {tokenize_time}, prefill {prefill_time}, decode {decode_time}') + except: + logger.info(f'Performance statistics not recorded') + + +class ThreadContext: + ''' + A thread context holding assistant logics + + ''' + + args: ConfigArgs + # Assistant Logic + assistant: Optional[AssistantObject] = None + related_threads : List[ThreadObject] + thread: ThreadObject + messages: List[MessageObject] = [] + run: RunObject + + interface: Optional[BackendInterfaceBase] = None + + queue: Optional[Queue] = None + timer: Profiler = Profiler() + + def __init__(self, run: RunObject,interface:BackendInterfaceBase, args: ConfigArgs = default_args) -> None: + self.args = args + self.thread_manager = ThreadsDatabaseManager() + self.message_manager = MessageDatabaseManager() + self.runs_manager = RunsDatabaseManager() + self.assistant_manager = AssistantDatabaseManager() + self.thread = self.thread_manager.db_get_thread_by_id(run.thread_id) + self.assistant = self.assistant_manager.db_get_assistant_by_id(run.assistant_id) + self.messages = self.message_manager.db_list_messages_of_thread(run.thread_id,order=Order.ASC) + logger.debug(f"{len(self.messages)} messages loaded from database") + self.interface = interface + self.update_by_run(run,args) + + def get_local_messages(self): + ''' + Get local messages, as the input to interface.work + This function is intended to message preprocess e.g. apply chat template + ''' + raise NotImplementedError + + def update_by_run(self,run:RunObject,args:ConfigArgs = default_args): + self.run = run + self.args = args + + def put_user_message(self, message: MessageObject): + assert ( + message.role.is_user() and message.thread_id == self.thread.id and message.status == MessageObject.Status.in_progress + ) + self.messages.append(message) + + def delete_user_message(self,message_id: ObjectID): + self.messages = [m for m in self.messages if m.id != message_id] + + async def work(self)->AsyncIterator: + logger.debug('start working') + user_message = self.messages[-1] + if not user_message.role.is_user(): + raise request_error('user must talk before LLM can talk') + user_message.status = MessageObject.Status.completed + user_message.sync_db() + + local_messages = self.get_local_messages() # must get this before we interseted reply_message + + + response_str_count = 0 + reply_message = self.message_manager.create_message_object( + self.thread.id, + self.run.id, + MessageCreate(role=Role.assistant, content=""), + ) + reply_message.assistant_id = self.assistant.id + self.messages.append(reply_message) + + yield reply_message.stream_response_with_event(MessageObject.Status.created) + yield reply_message.stream_response_with_event(MessageObject.Status.in_progress) + yield self.run.stream_response_with_event(RunObject.Status.in_progress) + + async for token in self.interface.inference(local_messages,self.thread.id): + if self.run.status == RunObject.Status.cancelling: + logger.warn(f'Run {self.run.id} cancelling') + break + yield reply_message.append_message_delta(token) + response_str_count+=1 + + if self.run.status == RunObject.Status.cancelling: + yield self.run.stream_response_with_event(RunObject.Status.cancelled) + yield reply_message.stream_response_with_event(MessageObject.Status.incomplete) + elif self.run.status == RunObject.Status.in_progress: + yield self.run.stream_response_with_event(RunObject.Status.completed) + yield reply_message.stream_response_with_event(MessageObject.Status.completed) + else: + raise NotImplementedError(f'{self.run.status} should not appear here') + + reply_message.sync_db() + self.run.sync_db() \ No newline at end of file diff --git a/ktransformers/server/backend/context_manager.py b/ktransformers/server/backend/context_manager.py new file mode 100644 index 0000000..f18e3cf --- /dev/null +++ b/ktransformers/server/backend/context_manager.py @@ -0,0 +1,54 @@ +from asyncio import Lock +from typing import Dict, Optional + +from ktransformers.server.backend.base import ThreadContext, BackendInterfaceBase +from ktransformers.server.schemas.assistants.runs import RunObject +from ktransformers.server.schemas.base import ObjectID +from ktransformers.server.config.log import logger +from ktransformers.server.backend.interfaces.transformers import TransformersThreadContext +from ktransformers.server.backend.interfaces.ktransformers import KTransformersThreadContext +from ktransformers.server.backend.interfaces.exllamav2 import ExllamaThreadContext + +from ktransformers.server.backend.interfaces.exllamav2 import ExllamaInterface +from ktransformers.server.backend.interfaces.transformers import TransformersInterface +from ktransformers.server.backend.interfaces.ktransformers import KTransformersInterface +class ThreadContextManager: + lock: Lock + threads_context: Dict[ObjectID, ThreadContext] + interface: BackendInterfaceBase + + def __init__(self,interface) -> None: + logger.debug(f"Creating Context Manager") + self.lock = Lock() + self.threads_context = {} + self.interface = interface + pass + + async def get_context_by_run_object(self, run: RunObject) -> ThreadContext: + async with self.lock: + logger.debug(f"keys {self.threads_context.keys()}") + if run.thread_id not in self.threads_context: + logger.debug(f"new inference context {run.thread_id}") + if isinstance(self.interface, ExllamaInterface): + new_context = ExllamaThreadContext(run, self.interface) + elif isinstance(self.interface, KTransformersInterface): + new_context = KTransformersThreadContext(run, self.interface) + elif isinstance(self.interface, TransformersInterface): + new_context = TransformersThreadContext(run, self.interface) + else: + raise NotImplementedError + self.threads_context[run.thread_id] = new_context + # self.threads_context[run.thread_id] = ExllamaInferenceContext(run) + re = self.threads_context[run.thread_id] + re.update_by_run(run) + return re + + async def get_context_by_thread_id(self, thread_id: ObjectID) -> Optional[ThreadContext]: + async with self.lock: + if thread_id in self.threads_context: + logger.debug(f'found context for thread {thread_id}') + return self.threads_context[thread_id] + else: + logger.debug(f'no context for thread {thread_id}') + return None + \ No newline at end of file diff --git a/ktransformers/server/backend/interfaces/__init__.py b/ktransformers/server/backend/interfaces/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ktransformers/server/backend/interfaces/exllamav2.py b/ktransformers/server/backend/interfaces/exllamav2.py new file mode 100644 index 0000000..6385264 --- /dev/null +++ b/ktransformers/server/backend/interfaces/exllamav2.py @@ -0,0 +1,40 @@ +import sys, os +from typing import AsyncIterator, Dict, Tuple + +import torch + +from ..args import ConfigArgs, default_args + +from ..base import BackendInterfaceBase, ThreadContext +from ktransformers.server.schemas.assistants.runs import RunObject + + +from ..args import * + +class ExllamaThreadContext(ThreadContext): + def __init__(self, run: RunObject, args: ConfigArgs = default_args) -> None: + super().__init__(run,args) + + def get_interface(self): + return + + def get_local_messages(self): + raise NotImplementedError + + + + +class ExllamaInterface(BackendInterfaceBase): + + def __init__(self, args: ConfigArgs = ...): + raise NotImplementedError + + def tokenize_prompt(self, prompt: str) -> torch.Tensor: + raise NotImplementedError + + async def inference(self,local_messages,request_unique_id:Optional[str])->AsyncIterator: + raise NotImplementedError + + + + diff --git a/ktransformers/server/backend/interfaces/ktransformers.py b/ktransformers/server/backend/interfaces/ktransformers.py new file mode 100644 index 0000000..77b0cda --- /dev/null +++ b/ktransformers/server/backend/interfaces/ktransformers.py @@ -0,0 +1,78 @@ +import torch +from transformers import AutoTokenizer, AutoConfig, GenerationConfig +from ktransformers.server.backend.interfaces.transformers import TransformersInterface,ConfigArgs, TransformersThreadContext,default_args,TextStreamer +from ktransformers.server.config.log import logger +from ktransformers.optimize.optimize import optimize_and_load_gguf +from ktransformers.models.custom_cache import StaticCache +from ktransformers.util.cuda_graph_runner import CUDAGraphRunner +from ktransformers.local_chat import custom_models, default_optimize_rules + + +class KTransformersThreadContext(TransformersThreadContext): + pass + + +class KTransformersInterface(TransformersInterface): + def __init__(self,args:ConfigArgs= default_args): + self.args = args + torch.set_default_dtype(torch.bfloat16) + torch.set_grad_enabled(False) + self.tokenizer = AutoTokenizer.from_pretrained(args.model_dir,device = args.device) + config=AutoConfig.from_pretrained(args.model_dir, trust_remote_code=True) + if config.architectures[0] == "Qwen2MoeForCausalLM": + config._attn_implementation="flash_attention_2" + + with torch.device("meta"): + self.model=custom_models[config.architectures[0]](config) + + optimize_rule_path = default_optimize_rules[config.architectures[0]] + + # print(optimize_config) + + gguf_path = args.gguf_path + if gguf_path is None: + gguf_path = input( + "please input the path of your gguf file(gguf file in the dir containing input gguf file must all belong to current model):" + ) + optimize_and_load_gguf(self.model, optimize_rule_path, gguf_path, config) + + + + logger.info(f'{args.model_name} loaded from {args.model_dir} to {args.device}') + self.cache = StaticCache(config=self.model.config, max_batch_size=args.batch_size, max_cache_len=args.cache_lens, device=args.device, dtype=self.model.dtype) + logger.info(f'StaticCache (length={args.cache_lens}) created at {args.device}, batch size:{args.batch_size}') + self.model.generation_config = GenerationConfig.from_pretrained(args.model_dir) + if self.model.generation_config.pad_token_id is None: + self.model.generation_config.pad_token_id = self.model.generation_config.eos_token_id + self.streamer = TextStreamer(self.tokenizer) + + def decode_one_tokens(self): + if not hasattr(self, "cuda_graph_runner"): + self.cuda_graph_runner = CUDAGraphRunner() + self.cuda_graph_runner.capture(self.model, self.current_ids, self.active_cache_position.unsqueeze(0), self.active_cache_position, self.cache, return_dict=False, use_cache=True) + + if hasattr(self, "cuda_graph_runner"): + logits = self.cuda_graph_runner(self.current_ids, self.active_cache_position.unsqueeze(0), self.active_cache_position) + self.cache.change_seq_length(1) + torch.cuda.synchronize() + logits = logits[0,-1,:] + return self.logits_to_token(logits) + + if self.use_static_cache: + mask = torch.ones((1,self.seq_length)).to(self.args.device) + logits = self.model( + self.current_ids, + cache_position=self.active_cache_position, + past_key_values=self.cache, + attention_mask=mask, + return_dict=False, + use_cache=True + )[0] + else: + logits = self.model( + self.current_ids, + return_dict=False + )[0] + logits = logits[0,-1,:] + + return self.logits_to_token(logits) diff --git a/ktransformers/server/backend/interfaces/transformers.py b/ktransformers/server/backend/interfaces/transformers.py new file mode 100644 index 0000000..2c4779d --- /dev/null +++ b/ktransformers/server/backend/interfaces/transformers.py @@ -0,0 +1,337 @@ +from typing import Any, List, Optional, Set +from transformers import LlamaTokenizer,AutoTokenizer, AutoConfig, LlamaForCausalLM,GenerationConfig, StaticCache, AutoModelForCausalLM,BitsAndBytesConfig + +from ktransformers.server.schemas.base import ObjectID +from ktransformers.server.utils.multi_timer import Profiler +import torch +import sys, os +from ..base import ThreadContext,BackendInterfaceBase +from ktransformers.server.config.log import logger +from ..args import ConfigArgs,default_args + + + +# This TextStreamer is a modified version from https://github.com/huggingface/transformers/blob/main/src/transformers/generation/streamers.py +class TextStreamer: + + def __init__(self, tokenizer: "AutoTokenizer", skip_prompt: bool = False, **decode_kwargs): + self.tokenizer = tokenizer + self.skip_prompt = skip_prompt + self.decode_kwargs = decode_kwargs + + # variables used in the streaming process + self.token_cache = [] + self.print_len = 0 + self.next_tokens_are_prompt = True + + def reset(self): + self.token_cache = [] + self.print_len = 0 + + def put(self, value)->Optional[str]: + """ + Receives tokens, decodes them, and prints them to stdout as soon as they form entire words. + """ + if not isinstance(value,int): + raise ValueError("TextStreamer only supports batch size 1, and int type input") + + + if self.skip_prompt and self.next_tokens_are_prompt: + self.next_tokens_are_prompt = False + return None + + # Add the new token to the cache and decodes the entire thing. + self.token_cache.append(value) + text = self.tokenizer.decode(self.token_cache, skip_special_tokens=True,**self.decode_kwargs) + + # After the symbol for a new line, we flush the cache. + if text.endswith("\n"): + printable_text = text[self.print_len :] + self.reset() + # If the last token is a CJK character, we print the characters. + elif len(text) > 0 and self._is_chinese_char(ord(text[-1])): + printable_text = text[self.print_len :] + self.print_len += len(printable_text) + # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, + # which may change with the subsequent token -- there are probably smarter ways to do this!) + else: + printable_text = text[self.print_len : text.rfind(" ") + 1] + self.print_len += len(printable_text) + return printable_text + + def end(self)->Optional[str]: + """Flushes any remaining cache and prints a newline to stdout.""" + # Flush the cache, if it exists + if len(self.token_cache) > 0: + text = self.tokenizer.decode(self.token_cache, skip_special_tokens=True, **self.decode_kwargs) + printable_text = text[self.print_len :] + self.reset() + else: + printable_text = "" + + self.next_tokens_are_prompt = True + return printable_text + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # + return True + + return False + + +class TransformersThreadContext(ThreadContext): + def get_local_messages(self): + local_messages = [] + for m in self.messages: + local_messages.append( + {'role':m.role.value, + 'content':m.get_text_content()} + ) + + return local_messages + + +class TransformersInterface(BackendInterfaceBase): + use_static_cache : bool = True + + + model: Any + tokenizer: AutoTokenizer + + cache: StaticCache + generated_ids:torch.Tensor + seq_length:int + + streamer: TextStreamer + + # thread_related + last_request_id: Optional[str] = None + ever_generated_ids: Set[int] = set() + + + + def __init__(self, args:ConfigArgs = default_args): + self.args = args + + self.tokenizer = AutoTokenizer.from_pretrained(args.model_dir) + self.model = AutoModelForCausalLM.from_pretrained(args.model_dir, device_map=args.device,use_safetensors=True) + logger.info(f'{args.model_name} loaded from {args.model_dir} to {args.device}') + + self.cache = StaticCache(config=self.model.config, max_batch_size=args.batch_size, max_cache_len=args.cache_lens, device=args.device, dtype=self.model.dtype) + logger.info(f'StaticCache (length={args.cache_lens}) created at {args.device}, batch size:{args.batch_size}') + + self.streamer = TextStreamer(self.tokenizer) + + + + @property + def current_ids(self): + return self.generated_ids[:,self.seq_length-1].unsqueeze(1) + + @property + def active_cache_position(self): + return torch.tensor([self.seq_length-1], device=self.args.device) + + + def tokenize_prompt(self,prompt:str): + input_ids = self.tokenizer.encode(prompt,return_tensors='pt').to(self.args.device) + return input_ids + + def format_and_tokenize_input_ids(self,thread_id:ObjectID,messages:List): + for m in messages: + if m['role']=='system': + logger.warn(f'change {m["role"]} to user') + m['role'] = 'user' + + new_messages = [messages[0]] + for m in messages[1:]: + if m['role'] == 'user' and new_messages[-1]['role']=='user': + logger.warn('merge two adjacent user messages') + new_messages[-1]['content']+=m['content'] + else: + new_messages.append(m) + + + input_ids = self.tokenizer.apply_chat_template(new_messages,return_tensors='pt',add_generation_prompt=True).to(self.args.device) + + if (self.last_request_id is not None) and self.last_request_id == thread_id: + x = self.generated_ids[:,:self.seq_length] + y = input_ids[:,:self.seq_length] + # We can only hope that the input_ids are the same + unequal_mask = torch.ne(x,y) + unequal_positions = torch.nonzero(unequal_mask) + num_unequal_elements = unequal_mask.sum().item() + logger.warn(f'num_unequal_elements: {num_unequal_elements}') + + input_ids = input_ids[:,self.seq_length:] + logger.debug(f'get input ids of shape {input_ids.shape}') + return input_ids + + def append_new_tokens(self,new_tokens:int)->Optional[str]: + self.generated_ids[0,self.seq_length] = new_tokens + self.seq_length+=1 + return self.streamer.put(new_tokens) + + def logits_to_token(self,logits:torch.Tensor): + logits = logits/self.args.temperature + + for token_idx in self.ever_generated_ids: + if logits[token_idx] < 0: + logits[token_idx] *= self.args.repetition_penalty + else: + logits[token_idx] /= self.args.repetition_penalty + + probs = torch.nn.functional.softmax(logits, dim=-1) + + sample = True + if sample: + last = torch.multinomial(probs, num_samples=1) + else: + _, last = torch.topk(probs, k=1, dim=-1) + + last = last.item() + self.ever_generated_ids.add(last) + return last + + + + def decode_one_tokens(self): + if self.use_static_cache: + mask = torch.ones((1,self.seq_length)).to(self.args.device) + logits = self.model( + self.current_ids, + cache_position=self.active_cache_position, + past_key_values=self.cache, + attention_mask=mask, + return_dict=False, + use_cache=True + )[0] + else: + logits = self.model( + self.current_ids, + return_dict=False + )[0] + logits = logits[0,-1,:] + + return self.logits_to_token(logits) + + @torch.no_grad + def prefill(self,input_ids:torch.Tensor,is_new:bool): + input_ids_length = input_ids.shape[-1] + self.profiler.set_counter('prefill',input_ids_length) + logger.debug(f'input_ids: {input_ids.shape}') + + + if is_new: + self.cache.reset() + self.ever_generated_ids.clear() + former_seq_length = 0 + self.seq_length = input_ids_length + self.generated_ids = torch.zeros( + self.args.batch_size, self.seq_length + self.args.max_new_tokens + 1, dtype=torch.int, device=self.args.device + ) + else: + logger.debug(f'generate_ids: {self.generated_ids.shape}') + former_seq_length = self.seq_length + self.seq_length += input_ids_length + expected_length = self.seq_length + self.args.max_new_tokens+1 + delta_length = expected_length - self.generated_ids.shape[-1] + if delta_length>0: + new_generate_ids = torch.zeros( + self.args.batch_size, delta_length, dtype=torch.int, device=self.args.device + ) + self.generated_ids = torch.cat([self.generated_ids,new_generate_ids],dim=-1) + logger.debug(f'cache position: {former_seq_length} to {self.seq_length}') + cache_position = torch.arange(former_seq_length,self.seq_length, device=self.args.device) + self.generated_ids[:,cache_position] = input_ids.to(self.args.device).to(torch.int) + + mask = torch.ones((1,self.seq_length)).to(self.args.device) + device = input_ids.device + if not(type(self) is TransformersInterface): + input_ids = input_ids.to("cpu") + inputs_embeds = self.model.model.embed_tokens(input_ids).to(device) + if self.use_static_cache: + logits = self.model( + inputs_embeds=inputs_embeds, cache_position=cache_position, past_key_values=self.cache,return_dict=False, use_cache=True,attention_mask=mask + )[0] + else: + logits = self.model( + inputs_embeds=inputs_embeds,return_dict=False + )[0] + + + + next_token = self.logits_to_token(logits[0,-1,:]) + yield self.append_new_tokens(next_token) + + @torch.no_grad + def generate(self): + self.profiler.set_counter('decode',0) + for _ in range(1, self.args.max_new_tokens): + with torch.backends.cuda.sdp_kernel(enable_flash=False, enable_mem_efficient=False, enable_math=True): + next_token = self.decode_one_tokens() + self.profiler.inc('decode') + if next_token == self.tokenizer.eos_token_id: + assert self.args.batch_size == 1 + break + yield self.append_new_tokens(next_token) + yield self.streamer.end() + + def check_is_new(self,thread_id:str): + if not self.use_static_cache: + return True + if self.last_request_id is None: + self.last_request_id = thread_id + return True + else: + if self.last_request_id==thread_id: + return False + else: + self.last_request_id = thread_id + return True + + async def inference(self,local_messages,thread_id:str): + self.profiler.create_and_start_timer('tokenize') + if isinstance(local_messages,List): + input_ids = self.format_and_tokenize_input_ids(thread_id,local_messages) + elif isinstance(local_messages,str): + input_ids = self.tokenize_prompt(local_messages) + else: + raise ValueError('local_messages should be List or str') + + self.profiler.pause_timer('tokenize') + + self.profiler.create_and_start_timer('prefill') + for t in self.prefill(input_ids,self.check_is_new(thread_id)): + if t is not None: + print(t,end='') + yield t + self.profiler.pause_timer('prefill') + + self.profiler.create_and_start_timer('decode') + for t in self.generate(): + if t is not None: + print(t,end='') + yield t + print('') + self.profiler.pause_timer('decode') + self.report_last_time_performance() + diff --git a/ktransformers/server/config/config.py b/ktransformers/server/config/config.py new file mode 100644 index 0000000..e17d215 --- /dev/null +++ b/ktransformers/server/config/config.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Description : +Author : unicornchan +Date : 2024-06-11 16:35:42 +Version : 1.0.0 +LastEditors : chenxl +LastEditTime : 2024-07-27 01:55:42 +''' +import os +import yaml + +from ktransformers.server.config.singleton import Singleton + + +class Config(metaclass=Singleton): + """Singleton pattern Config class, used to get all configurations. + """ + CONFIG_FILE_NAME = "config.yaml" + + @staticmethod + def load() -> dict: + """load config file + + Returns: + dict: all configs + """ + base_path: str = os.path.dirname( + os.path.dirname(os.path.dirname(__file__))) + config_yaml: str = os.path.join( + base_path, "configs", Config.CONFIG_FILE_NAME) + if not os.path.exists(config_yaml): + print(f"Can't find config file, {config_yaml}") + exit(-1) + with open(config_yaml, 'r', encoding="utf-8") as fp: + config = yaml.safe_load(fp) + return config + + @staticmethod + def to_path(path: str) -> str: + """ + process file path + """ + base_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) + real_path = path if os.path.isabs( + path) else os.path.join(base_path, path) + return real_path + + def __init__(self): + cfg = Config.load() + self.base_path = os.path.dirname( + os.path.dirname(os.path.dirname(__file__))) + # log configs + self.log_dir = os.path.join(self.base_path, Config.to_path(cfg["log"]["dir"])) + self.log_file = cfg["log"]["file"] + self.log_level = cfg["log"]["level"] + self.backup_count = cfg["log"]["backup_count"] + + # server configs + self.server: dict = cfg.get("server",{}) + self.server_ip = self.server.get("ip", "0.0.0.0") + self.server_port = self.server.get("port", 9016) + + # db configs + self.db_configs: dict = cfg.get("db", {}) + self.db_type = self.db_configs.get("type", "") + self.db_host = os.path.join(self.base_path, self.db_configs.get("host", "")) + self.db_port = self.db_configs.get("port", "") + self.db_name = self.db_configs.get("database", "") + self.db_pool_size = self.db_configs.get("pool_size") + self.db_database = self.db_configs.get("database", "") + + # user config + self.user_config: dict = cfg.get("user", {}) + self.user_secret_key = self.user_config.get("secret_key", "") + self.user_algorithm = self.user_config.get("algorithm", "") + + # model config + self.model:dict = cfg.get("model", {}) + self.backend_type: str = self.model.get("type", "transformers") + self.model_path: str = self.model.get("path", "") + self.model_name: str = self.model.get("name", "") + self.model_device: str = self.model.get("device", "cuda:0") + self.gguf_path: str = self.model.get("gguf_path", "") + + # web config + self.web: dict = cfg.get("web", {}) + self.web_cross_domain: bool = self.web.get("open_cross_domain", True) + self.mount_web: bool = self.web.get("mount", False) + + self.ext: dict = cfg.get("ext", {}) + self.cpu_infer = self.ext.get("cpu_infer", 10) diff --git a/ktransformers/server/config/log.py b/ktransformers/server/config/log.py new file mode 100644 index 0000000..1d594fe --- /dev/null +++ b/ktransformers/server/config/log.py @@ -0,0 +1,176 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Description : +Author : unicornchan +Date : 2024-06-12 02:48:39 +Version : 1.0.0 +LastEditors : chenxl +LastEditTime : 2024-07-27 01:55:50 +''' + +import codecs +import logging +import os +import re +import locale +from pathlib import Path +from logging.handlers import BaseRotatingHandler +import time +import colorlog + +from ktransformers.server.config.config import Config + + +class DailyRotatingFileHandler(BaseRotatingHandler): + """ + such as 'logging.TimeRotatingFileHandler', Additional features: + - support multiprocess + - support rotating daily + """ + + def __init__(self, filename, backupCount=0, encoding=None, delay=False, utc=False, **kwargs): # pylint: disable=unused-argument + self.backup_count = backupCount + self.utc = utc + self.suffix = "%Y-%m-%d" + self.base_log_path = Path(filename) + if not os.path.exists(self.base_log_path.parent): + os.makedirs(self.base_log_path.parent) + self.base_filename = self.base_log_path.name + self.current_filename = self._compute_fn() + self.current_log_path = self.base_log_path.with_name( + self.current_filename) + BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay) + + # pylint: disable=unused-argument, invalid-name + def shouldRollover(self, record): + """ + Determine whether to rotate the log. If the log filename corresponding to the current + time is not consistent with the currently opened log filename, then it is necessary + to rotate the log + Args: + record: record is not used, as we are just comparing times, but it is needed so + the method signatures are the same + """ + if self.current_filename != self._compute_fn(): + return True + return False + + def doRollover(self): + """ + roll over + """ + # close last log file + if self.stream: + self.stream.close() + self.stream = None # type: ignore + + # gen new log file name + self.current_filename = self._compute_fn() + self.current_log_path = self.base_log_path.with_name( + self.current_filename) + + if not self.delay: + self.stream = self._open() # type: ignore + + self.delete_expired_files() + + def _compute_fn(self): + """ + gen log file name + """ + return self.base_filename + "." + time.strftime(self.suffix, time.localtime()) + + def _open(self): + """ + open a new log file, create soft link + """ + if self.encoding is None: + stream = open(str(self.current_log_path), self.mode, encoding=locale.getpreferredencoding()) + else: + stream = codecs.open(str(self.current_log_path), self.mode, self.encoding) + + if self.base_log_path.exists(): + try: + if not self.base_log_path.is_symlink() or os.readlink(self.base_log_path) != self.current_filename: + os.remove(self.base_log_path) + except OSError: + pass + + try: + os.symlink(self.current_filename, str(self.base_log_path)) + except OSError: + pass + return stream + + def delete_expired_files(self): + """ + delete expired files every day + """ + if self.backup_count <= 0: + return + + file_names = os.listdir(str(self.base_log_path.parent)) + result = [] + prefix = self.base_filename + "." + plen = len(prefix) + for file_name in file_names: + if file_name[:plen] == prefix: + suffix = file_name[plen:] + if re.match(r"^\d{4}-\d{2}-\d{2}(\.\w+)?$", suffix): + result.append(file_name) + if len(result) < self.backup_count: + result = [] + else: + result.sort() + result = result[:len(result) - self.backup_count] + + for file_name in result: + os.remove(str(self.base_log_path.with_name(file_name))) + + +class Logger(object): + """ + logger class + """ + level_relations = { + 'debug': logging.DEBUG, + 'info': logging.INFO, + 'warn': logging.WARNING, + 'error': logging.ERROR, + 'crit': logging.CRITICAL + } + + def __init__(self, level: str = 'info'): + fmt = '%(asctime)s %(levelname)s %(pathname)s[%(lineno)d] %(funcName)s: %(message)s' + cfg: Config = Config() + filename: str = os.path.join(cfg.log_dir, cfg.log_file) + backup_count: int = cfg.backup_count + th = DailyRotatingFileHandler(filename=filename, when='MIDNIGHT', backupCount=backup_count, encoding="utf-8") + th.setFormatter(logging.Formatter(fmt)) + + + color_fmt = ( + '%(log_color)s%(asctime)s %(levelname)s %(pathname)s[%(lineno)d]: %(message)s' + ) + color_formatter = colorlog.ColoredFormatter( + color_fmt, + log_colors={ + 'DEBUG': 'cyan', + 'INFO': 'green', + 'WARNING': 'yellow', + 'ERROR': 'red', + 'CRITICAL': 'bold_red' + } + ) + + sh = logging.StreamHandler() + sh.setFormatter(color_formatter) + + self.logger = logging.getLogger(filename) + self.logger.setLevel(self.level_relations.get(level)) # type: ignore + self.logger.addHandler(th) + self.logger.addHandler(sh) + + +logger = Logger(level=Config().log_level).logger diff --git a/ktransformers/server/config/singleton.py b/ktransformers/server/config/singleton.py new file mode 100644 index 0000000..1af5d6c --- /dev/null +++ b/ktransformers/server/config/singleton.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Description : Implement singleton +Author : unicornchan +Date : 2024-06-11 17:08:36 +Version : 1.0.0 +LastEditors : chenxl +LastEditTime : 2024-07-27 01:55:56 +''' +import abc + +class Singleton(abc.ABCMeta, type): + """_summary_ + + Args: + abc.ABCMeta: Provide a mechanism for defining abstract methods and properties, + enforcing subclasses to implement these methods and properties. + type: Inherit from 'type' to make 'Singleton' a metaclass, + enabling the implementation of the Singleton + """ + _instances = {} + + def __call__(cls, *args, **kwds): + if cls not in cls._instances: + cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwds) + return cls._instances[cls] + +class AbstractSingleton(abc.ABC, metaclass=Singleton): + """Provided an abstract Singleton base class, any class inheriting from + this base class will automatically become a Singleton class. + + Args: + abc.ABC: Abstract base class, it cannot be instantiated, only inherited. + """ diff --git a/ktransformers/server/crud/__init__.py b/ktransformers/server/crud/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ktransformers/server/crud/assistants/__init__.py b/ktransformers/server/crud/assistants/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ktransformers/server/crud/assistants/assistants.py b/ktransformers/server/crud/assistants/assistants.py new file mode 100644 index 0000000..2d942c9 --- /dev/null +++ b/ktransformers/server/crud/assistants/assistants.py @@ -0,0 +1,66 @@ +from time import time +from typing import Optional,List +from uuid import uuid4 + +from ktransformers.server.models.assistants.assistants import Assistant +from ktransformers.server.schemas.assistants.assistants import AssistantCreate,AssistantObject,AssistantModify +from ktransformers.server.utils.sql_utils import SQLUtil +from ktransformers.server.config.log import logger +from ktransformers.server.schemas.base import Order + + +class AssistantDatabaseManager: + def __init__(self) -> None: + self.sql_util = SQLUtil() + + def create_assistant_object(self, assistant: AssistantCreate) -> AssistantObject: + assistant = AssistantObject( + **assistant.model_dump(mode='json'), + id=str(uuid4()), + object='assistant', + created_at=int(time()), + ) + return assistant + + def db_count_assistants(self) -> int: + with self.sql_util.get_db() as db: + return db.query(Assistant).count() + + def db_create_assistant(self, assistant: AssistantCreate): + ass_obj = self.create_assistant_object(assistant) + ass_obj.sync_db() + return ass_obj + + def db_list_assistants(self, limit: Optional[int], order: Order) -> List[AssistantObject]: + with self.sql_util.get_db() as db: + query = db.query(Assistant).order_by( + order.to_sqlalchemy_order()(Assistant.created_at)) + if limit is not None: + db_assistants = query.limit(limit) + else: + db_assistants = query.all() + return [AssistantObject.model_validate(a.__dict__) for a in db_assistants] + + def db_get_assistant_by_id(self, assistant_id: str) -> Optional[AssistantObject]: + with self.sql_util.get_db() as db: + db_assistant = db.query(Assistant).filter( + Assistant.id == assistant_id).first() + if db_assistant is None: + logger.debug(f"no assistant with id {str}") + return None + return AssistantObject.model_validate(db_assistant.__dict__) + + def db_update_assistant_by_id(self, assistant_id: str, assistant: AssistantModify): + with self.sql_util.get_db() as db: + db_assistant = db.query(Assistant).filter( + Assistant.id == assistant_id).first() + self.sql_util.db_update_commit_refresh(db, db_assistant, assistant) + return AssistantObject.model_validate(db_assistant.__dict__) + + def db_delete_assistant_by_id(self, assistant_id: str): + with self.sql_util.get_db() as db: + db_assistant = db.query(Assistant).filter( + Assistant.id == assistant_id).first() + db.delete(db_assistant) + db.commit() + diff --git a/ktransformers/server/crud/assistants/messages.py b/ktransformers/server/crud/assistants/messages.py new file mode 100644 index 0000000..779a6c8 --- /dev/null +++ b/ktransformers/server/crud/assistants/messages.py @@ -0,0 +1,86 @@ +from time import time +from typing import Optional +from uuid import uuid4 + +from ktransformers.server.models.assistants.messages import Message +from ktransformers.server.schemas.assistants.messages import MessageCore, MessageCreate, MessageObject +from ktransformers.server.schemas.base import Order,ObjectID +from ktransformers.server.utils.sql_utils import SQLUtil + +class MessageDatabaseManager: + def __init__(self) -> None: + self.sql_util = SQLUtil() + + @staticmethod + def create_db_message_by_core(message: MessageCore): + message_dict = message.model_dump(mode="json") + return Message(**message_dict, id=str(uuid4()), created_at=int(time())) + + def create_db_message(self, message: MessageCreate): + return MessageDatabaseManager.create_db_message_by_core(message.to_core()) + + def db_add_message(self, message: Message): + with self.sql_util.get_db() as db: + db.add(message) + self.sql_util.db_add_commit_refresh(db, message) + + def db_create_message(self, thread_id: str, message: MessageCreate, status: MessageObject.Status): + db_message = self.create_db_message(message) + db_message.status = status.value + db_message.thread_id = thread_id + self.db_add_message(db_message) + return MessageObject.model_validate(db_message.__dict__) + + @staticmethod + def create_message_object(thread_id: ObjectID, run_id: ObjectID, message: MessageCreate): + core = message.to_core() + return MessageObject( + **core.model_dump(mode='json'), + id=str(uuid4()), + object='thread.message', + created_at=int(time()), + thread_id=thread_id, + run_id=run_id, + status=MessageObject.Status.in_progress, + ) + + def db_sync_message(self, message: MessageObject): + db_message = Message( + **message.model_dump(mode="json"), + ) + with self.sql_util.get_db() as db: + self.sql_util.db_merge_commit(db, db_message) + + def db_list_messages_of_thread( + self, thread_id: str, limit: Optional[int] = None, order: Order = Order.DESC): + + # logger.debug( + # f"list messages of: {thread_id}, limit {limit}, order {order}") + with self.sql_util.get_db() as db: + query = ( + db.query(Message) + .filter(Message.thread_id == thread_id) + .order_by(order.to_sqlalchemy_order()(Message.created_at)) + ) + if limit is not None: + messages = query.limit(limit) + else: + messages = query.all() + message_list = [MessageObject.model_validate(m.__dict__) for m in messages] + return message_list + + def db_get_message_by_id(self, thread_id: ObjectID, message_id: ObjectID) -> MessageObject: + with self.sql_util.get_db() as db: + message = db.query(Message).filter( + Message.id == message_id).first() + assert message.thread_id == thread_id + message_info = MessageObject.model_validate(message.__dict__) + return message_info + + def db_delete_message_by_id(self, thread_id: ObjectID, message_id: ObjectID): + with self.sql_util.get_db() as db: + message = db.query(Message).filter( + Message.id == message_id).first() + assert message.thread_id == thread_id + db.delete(message) + db.commit() diff --git a/ktransformers/server/crud/assistants/runs.py b/ktransformers/server/crud/assistants/runs.py new file mode 100644 index 0000000..bdb884d --- /dev/null +++ b/ktransformers/server/crud/assistants/runs.py @@ -0,0 +1,50 @@ +from time import time +from uuid import uuid4 + +from ktransformers.server.models.assistants.runs import Run +from ktransformers.server.schemas.assistants.runs import RunCreate,RunObject +from ktransformers.server.schemas.base import ObjectID +from ktransformers.server.utils.sql_utils import SQLUtil + + +class RunsDatabaseManager: + def __init__(self) -> None: + self.sql_util = SQLUtil() + + def create_run_object(self, thread_id: ObjectID, run: RunCreate) -> RunObject: + run_obj = RunObject( + **run.model_dump(mode='json', exclude={"stream"}), + id=str(uuid4()), + object='run', + created_at=int(time()), + thread_id=thread_id, + status=RunObject.Status.queued, + ) + run_obj.set_compute_save(0) + return run_obj + + def db_create_run(self, thread_id: str, run: RunCreate): + db_run = Run( + **run.model_dump(mode="json", exclude={"stream"}), + id=str(uuid4()), + created_at=int(time()), + status="queued", + thread_id=thread_id, + ) + with self.sql_util.get_db() as db: + self.sql_util.db_add_commit_refresh(db, db_run) + run_obj = RunObject.model_validate(db_run.__dict__) + run_obj.set_compute_save(0) + return run_obj + + def db_sync_run(self, run: RunObject) -> None: + db_run = Run( + **run.model_dump(mode='json'), + ) + with self.sql_util.get_db() as db: + self.sql_util.db_merge_commit(db, db_run) + + def db_get_run(self, run_id: ObjectID) -> RunObject: + with self.sql_util.get_db() as db: + db_run = db.query(Run).filter(Run.id == run_id).first() + return RunObject.model_validate(db_run.__dict__) diff --git a/ktransformers/server/crud/assistants/threads.py b/ktransformers/server/crud/assistants/threads.py new file mode 100644 index 0000000..c87cb23 --- /dev/null +++ b/ktransformers/server/crud/assistants/threads.py @@ -0,0 +1,93 @@ +from time import time +from typing import Optional,List +from uuid import uuid4 + +from ktransformers.server.models.assistants.messages import Message +from ktransformers.server.models.assistants.threads import Thread +from ktransformers.server.schemas.assistants.threads import ThreadCreate,ThreadObject +from ktransformers.server.schemas.base import ObjectID, Order +from ktransformers.server.schemas.conversation import ThreadPreview +from ktransformers.server.utils.sql_utils import SQLUtil +from ktransformers.server.crud.assistants.messages import MessageDatabaseManager +from ktransformers.server.config.log import logger +from ktransformers.server.crud.assistants.assistants import AssistantDatabaseManager + +class ThreadsDatabaseManager: + def __init__(self) -> None: + self.sql_util = SQLUtil() + self.message_manager = MessageDatabaseManager() + self.assistant_maanager = AssistantDatabaseManager() + + def db_create_thread(self, thread: ThreadCreate): + thread_id = str(uuid4()) + db_messages = [] + with self.sql_util.get_db() as db: + if thread.messages is not None: + logger.debug("Creating messages first for thread") + for message in thread.messages: + db_message: Message = MessageDatabaseManager.create_db_message_by_core( + message) + db_message.role = "user" + db_message.thread_id = thread_id + db.add(db_message) + db_messages.append(db_message) + + db_thread = Thread( + **thread.model_dump(exclude="messages"), + id=str(uuid4()), + created_at=int(time()), + messages=db_messages, + ) + + self.sql_util.db_add_commit_refresh(db, db_thread) + thread_obj = ThreadObject.model_validate(db_thread.__dict__) + + if 'assistant_id' in thread.meta_data: +# assistant = self.assistant_maanager.db_get_assistant_by_id(thread.meta_data['assistant_id'], db) + assistant = self.assistant_maanager.db_get_assistant_by_id(thread.meta_data['assistant_id']) + logger.info( + f'Append this related thread to assistant {assistant.id}') + assistant.append_related_threads([thread_obj.id]) + assistant.sync_db(db) + return thread_obj + + def db_get_thread_by_id(self, thread_id: ObjectID): + with self.sql_util.get_db() as db: + db_thread = db.query(Thread).filter(Thread.id == thread_id).first() + return ThreadObject.model_validate(db_thread.__dict__) + + def db_list_threads(self, limit: Optional[int], order: Order) -> List[ThreadObject]: + with self.sql_util.get_db() as db: + query = db.query(Thread).order_by(order.to_sqlalchemy_order()( + Thread.created_at)).filter(~Thread.meta_data.contains('assistant_id')) + + if limit is not None: + db_threads = query.limit(limit) + else: + db_threads = query.all() + + return [ThreadObject.model_validate(tool.__dict__) for tool in db_threads] + + def db_list_threads_preview(self, limit: Optional[int], order: Order) -> List[ThreadPreview]: + threads = self.db_list_threads(limit, order) + previews = [] + for thread in threads: + messages = self.message_manager.db_list_messages_of_thread( + thread.id, limit=2, order=Order.ASC) + if len(messages) == 2: + message = messages[0] + assistant = self.assistant_maanager.db_get_assistant_by_id( + messages[1].assistant_id) + else: + message = None + assistant = None + previews.append(ThreadPreview( + assistant=assistant, thread=thread, first_message=message)) + return previews + + def db_delete_thread_by_id(self, thread_id: ObjectID): + with self.sql_util.get_db() as db: + db_thread = db.query(Thread).filter(Thread.id == thread_id).first() + db.delete(db_thread) + # TODO delete related messages and runs and other stuff or just gc + db.commit() diff --git a/ktransformers/server/exceptions.py b/ktransformers/server/exceptions.py new file mode 100644 index 0000000..059e99a --- /dev/null +++ b/ktransformers/server/exceptions.py @@ -0,0 +1,23 @@ +from fastapi import HTTPException, status + + +def db_exception(): + return HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="DB Error", + ) + + +def not_implemented(what): + return HTTPException( + status_code=status.HTTP_501_NOT_IMPLEMENTED, + detail=f"{what} not implemented", + ) + + +def internal_server_error(what): + return HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"{what}") + + +def request_error(what): + return HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"{what}") diff --git a/ktransformers/server/main.py b/ktransformers/server/main.py new file mode 100644 index 0000000..274a6bc --- /dev/null +++ b/ktransformers/server/main.py @@ -0,0 +1,143 @@ +import os +import re +from fastapi import FastAPI +from fastapi.staticfiles import StaticFiles +import uvicorn.logging +import argparse +import uvicorn +from fastapi.middleware.cors import CORSMiddleware +from ktransformers.server.config.config import Config +from ktransformers.server.utils.create_interface import create_interface +from ktransformers.server.backend.args import default_args +from fastapi.openapi.utils import get_openapi + +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware + + +from ktransformers.server.api import router, post_db_creation_operations +from ktransformers.server.utils.sql_utils import Base, SQLUtil +from ktransformers.server.config.log import logger + + +def mount_app_routes(mount_app: FastAPI): + sql_util = SQLUtil() + logger.info("Creating SQL tables") + Base.metadata.create_all(bind=sql_util.sqlalchemy_engine) + post_db_creation_operations() + mount_app.include_router(router) + + +def create_app(): + cfg = Config() + app = FastAPI() + if Config().web_cross_domain: + app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + mount_app_routes(app) + if cfg.mount_web: + mount_index_routes(app) + return app + +def update_web_port(config_file: str): + ip_port_pattern = r"(localhost|((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)):[0-9]{1,5}" + with open(config_file, "r", encoding="utf-8") as f_cfg: + web_config = f_cfg.read() + ip_port = "localhost:" + str(Config().server_port) + new_web_config = re.sub(ip_port_pattern, ip_port, web_config) + with open(config_file, "w", encoding="utf-8") as f_cfg: + f_cfg.write(new_web_config) + + +def mount_index_routes(app: FastAPI): + project_dir = os.path.dirname(os.path.dirname(__file__)) + web_dir = os.path.join(project_dir, "website/dist") + web_config_file = os.path.join(web_dir, "config.js") + update_web_port(web_config_file) + if os.path.exists(web_dir): + app.mount("/web", StaticFiles(directory=web_dir), name="static") + else: + err_str = f"No website resources in {web_dir}, please complile the website by npm first" + logger.error(err_str) + print(err_str) + exit(1) + + +def run_api(app, host, port, **kwargs): + if kwargs.get("ssl_keyfile") and kwargs.get("ssl_certfile"): + uvicorn.run(app, + host=host, + port=port, + ssl_keyfile=kwargs.get("ssl_keyfile"), + ssl_certfile=kwargs.get("ssl_certfile"), + ) + else: + uvicorn.run(app, host=host, port=port, log_level='debug') + + +def custom_openapi(app): + if app.openapi_schema: + return app.openapi_schema + openapi_schema = get_openapi( + title="ktransformers server", + version="1.0.0", + summary="This is a server that provides a RESTful API for ktransformers.", + description="We provided chat completion and openai assistant interfaces.", + routes=app.routes, + ) + openapi_schema["info"]["x-logo"] = { + "url": "https://kvcache.ai/media/icon_1.png" + } + app.openapi_schema = openapi_schema + return app.openapi_schema + +def main(): + cfg = Config() + parser = argparse.ArgumentParser(prog='kvcache.ai', + description='Ktransformers') + parser.add_argument("--host", type=str, default="0.0.0.0") + parser.add_argument("--port", type=int, default=cfg.server_port) + parser.add_argument("--ssl_keyfile", type=str) + parser.add_argument("--ssl_certfile", type=str) + parser.add_argument("--web", type=bool, default=False) + parser.add_argument("--model_name", type=str, default=cfg.model_name) + parser.add_argument("--model_path", type=str, default=cfg.model_path) + parser.add_argument("--device", type=str, default=cfg.model_device) + parser.add_argument("--gguf_path", type=str, default=cfg.gguf_path) + parser.add_argument("--optimize_config_path", type=str, required=False) + parser.add_argument("--cpu_infer", type=int, default=cfg.cpu_infer) + parser.add_argument("--type", type=str, default=cfg.backend_type) + + # ๅˆๅง‹ๅŒ–ๆถˆๆฏ + args = parser.parse_args() + cfg.model_name = args.model_name + cfg.model_path = args.model_path + cfg.model_device = args.device + cfg.mount_web = args.web + cfg.server_ip = args.host + cfg.server_port = args.port + cfg.cpu_infer = args.cpu_infer + cfg.backend_type = args.type + + default_args.model_dir = args.model_path + default_args.device = args.device + default_args.gguf_path = args.gguf_path + default_args.optimize_config_path = args.optimize_config_path + + app = create_app() + custom_openapi(app) + create_interface(config=cfg, default_args=default_args) + run_api(app=app, + host=args.host, + port=args.port, + ssl_keyfile=args.ssl_keyfile, + ssl_certfile=args.ssl_certfile,) + + +if __name__ == "__main__": + main() diff --git a/ktransformers/server/models/__init__.py b/ktransformers/server/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ktransformers/server/models/assistants/__init__.py b/ktransformers/server/models/assistants/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ktransformers/server/models/assistants/assistants.py b/ktransformers/server/models/assistants/assistants.py new file mode 100644 index 0000000..58e4340 --- /dev/null +++ b/ktransformers/server/models/assistants/assistants.py @@ -0,0 +1,29 @@ +from sqlalchemy import JSON, Column, Float, Integer, String, Text +from sqlalchemy.orm import relationship + +from ktransformers.server.utils.sql_utils import Base + + +class Assistant(Base): + __tablename__ = "assistants" + + id = Column(String, primary_key=True, index=True) + object = Column(String, default="assistant") + created_at = Column(Integer) + + name = Column(String, nullable=True) + description = Column(String, nullable=True) + model = Column(String) + instructions = Column(Text, nullable=True) + tools = Column(JSON) + tool_resources = Column(JSON) + temperature = Column(Float, nullable=True) + meta_data = Column(JSON, nullable=True) + top_p = Column(Float, nullable=True) + response_format = Column(JSON, default="auto") + + build_status = Column(JSON, nullable=True) + + runs = relationship("Run", back_populates="assistant") + + messages = relationship("Message", back_populates="assistant") diff --git a/ktransformers/server/models/assistants/messages.py b/ktransformers/server/models/assistants/messages.py new file mode 100644 index 0000000..c0e1ccb --- /dev/null +++ b/ktransformers/server/models/assistants/messages.py @@ -0,0 +1,28 @@ +from sqlalchemy import JSON, Column, ForeignKey, Integer, String +from sqlalchemy.orm import relationship + +from ktransformers.server.utils.sql_utils import Base + + +class Message(Base): + __tablename__ = "messages" + + id = Column(String, primary_key=True, index=True) + object = Column(String, default="thread.message") + created_at = Column(Integer) + + thread_id = Column(String, ForeignKey("threads.id")) + status = Column(String, default="in_progress") + incomplete_details = Column(JSON, nullable=True) + completed_at = Column(Integer, nullable=True) + incomplete_at = Column(Integer, nullable=True) + role = Column(JSON) + content = Column(JSON) + assistant_id = Column(String, ForeignKey("assistants.id"), nullable=True) + run_id = Column(String, ForeignKey("runs.id"), nullable=True) + attachments = Column(JSON, nullable=True) + meta_data = Column(JSON, nullable=True) + + thread = relationship("Thread", back_populates="messages") + assistant = relationship("Assistant", back_populates="messages") + run = relationship("Run", back_populates="message") diff --git a/ktransformers/server/models/assistants/run_steps.py b/ktransformers/server/models/assistants/run_steps.py new file mode 100644 index 0000000..b241931 --- /dev/null +++ b/ktransformers/server/models/assistants/run_steps.py @@ -0,0 +1,31 @@ +from sqlalchemy import JSON, Column, ForeignKey, Integer, String +from sqlalchemy.orm import relationship + +from ktransformers.server.utils.sql_utils import Base + + +class RunStep(Base): + __tablename__ = "run_steps" + # todo + id = Column(String, primary_key=True, index=True) + object = Column(String, default="thread.run.step") + created_at = Column(Integer) + + assistant_id = Column(String, ForeignKey("assistants.id")) + thread_id = Column(String, ForeignKey("threads.id")) + run_id = Column(String, ForeignKey("runs.id")) + type = Column(String) + status = Column(String) + step_details = Column(JSON) + last_error = Column(JSON, nullable=True) + expires_at = Column(Integer, nullable=True) + cancelled_at = Column(Integer, nullable=True) + failed_at = Column(Integer, nullable=True) + completed_at = Column(Integer, nullable=True) + + meta_data = Column(JSON, nullable=True) + usage = Column(JSON, nullable=True) + + assistant = relationship("Assistant", back_populates="run_steps") + thread = relationship("Thread", back_populates="run_steps") + run = relationship("Run", back_populates="run_steps") diff --git a/ktransformers/server/models/assistants/runs.py b/ktransformers/server/models/assistants/runs.py new file mode 100644 index 0000000..c002739 --- /dev/null +++ b/ktransformers/server/models/assistants/runs.py @@ -0,0 +1,39 @@ +from sqlalchemy import JSON, Column, Float, ForeignKey, Integer, String, Text +from sqlalchemy.orm import relationship + +from ktransformers.server.utils.sql_utils import Base + + +class Run(Base): + __tablename__ = "runs" + + id = Column(String, primary_key=True, index=True) + object = Column(String, default="thread.run") + created_at = Column(Integer) + thread_id = Column(String, ForeignKey("threads.id")) + assistant_id = Column(String, ForeignKey("assistants.id")) + status = Column(String) + required_action = Column(JSON, nullable=True) + last_error = Column(JSON, nullable=True) + expires_at = Column(Integer, nullable=True) + started_at = Column(Integer, nullable=True) + cancelled_at = Column(Integer, nullable=True) + failed_at = Column(Integer, nullable=True) + completed_at = Column(Integer, nullable=True) + incomplete_details = Column(JSON, nullable=True) + # get from assistant + model = Column(String) + instructions = Column(Text, nullable=True) + tools = Column(JSON) + meta_data = Column(JSON, nullable=True) + usage = Column(JSON, nullable=True) + temperature = Column(Float, nullable=True) + top_p = Column(Float, nullable=True) + max_propmp_tokens = Column(Integer, nullable=True) + truncation_strategy = Column(JSON) + tool_choice = Column(JSON) + response_format = Column(JSON, default="auto") + + thread = relationship("Thread", back_populates="runs") + assistant = relationship("Assistant", back_populates="runs") + message = relationship("Message", back_populates="run") diff --git a/ktransformers/server/models/assistants/threads.py b/ktransformers/server/models/assistants/threads.py new file mode 100644 index 0000000..d5f161c --- /dev/null +++ b/ktransformers/server/models/assistants/threads.py @@ -0,0 +1,18 @@ +from sqlalchemy import JSON, Column, Integer, String +from sqlalchemy.orm import relationship + +from ktransformers.server.utils.sql_utils import Base + + +class Thread(Base): + __tablename__ = "threads" + + id = Column(String, primary_key=True, index=True) + object = Column(String, default="thread") + created_at = Column(Integer) + + tool_resources = Column(JSON, nullable=True) + meta_data = Column(JSON, nullable=True) + + runs = relationship("Run", back_populates="thread") + messages = relationship("Message", back_populates="thread") diff --git a/ktransformers/server/requirements.txt b/ktransformers/server/requirements.txt new file mode 100644 index 0000000..d324cf2 --- /dev/null +++ b/ktransformers/server/requirements.txt @@ -0,0 +1,13 @@ +torch >= 2.3.0,<=2.3.1 +transformers == 4.43.2 +fastapi >= 0.111.0 +langchain >= 0.2.0 +blessed >= 1.20.0 +accelerate >= 0.31.0 +sentencepiece >= 0.1.97 +setuptools +build +ninja +wheel +colorlog +fire \ No newline at end of file diff --git a/ktransformers/server/schemas/__init__.py b/ktransformers/server/schemas/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ktransformers/server/schemas/assistants/__init__.py b/ktransformers/server/schemas/assistants/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ktransformers/server/schemas/assistants/assistants.py b/ktransformers/server/schemas/assistants/assistants.py new file mode 100644 index 0000000..24ea8d4 --- /dev/null +++ b/ktransformers/server/schemas/assistants/assistants.py @@ -0,0 +1,202 @@ +from enum import Enum +from time import time +from typing import AsyncIterable, Callable, Dict, List, Optional, Union +from asyncio import Lock, Queue + +from fastapi import logger +from pydantic import BaseModel, Field, PrivateAttr, field_validator, model_validator +import torch + +from ktransformers.server.config.config import Config +from ktransformers.server.models.assistants.assistants import Assistant +from ktransformers.server.models.assistants.threads import Thread +from ktransformers.server.schemas.assistants.messages import Role +from ktransformers.server.schemas.assistants.runs import RunObject,RunStreamResponse,ObjectWithCreatedTime +from ktransformers.server.schemas.assistants.threads import ThreadObject +from ktransformers.server.schemas.base import Metadata,MetadataField,ObjectID +from ktransformers.server.schemas.assistants.tool import Tool,CodeInterpreter,FileSearch,RelatedThreads,FuntionTool,ToolResource,CodeInterpreterResource,FileSearchResource,RelatedThreadsResource,ToolType +from ktransformers.server.utils.sql_utils import SQLUtil + + +class AssistantBase(BaseModel): + name: Optional[str] = Field(None,description='The name of the assistant.') + description: Optional[str] = Field(None,description='The description of the assistant.') + instructions: Optional[str] = Field(None,description='Instructions which is added in front of the input of LLM') + tools: List[Tool] = Field([], max_length=128) + + @field_validator('tools', mode='before') + def validate_tools(cls, value): + re = [] + if not isinstance(value, list): + raise ValueError('Invalid type for tools') + + for tool in value: + if 'type' not in tool: + raise ValueError('Invalid type for tools') + if tool['type'] == 'code_interpreter': + re.append(CodeInterpreter(**tool)) + elif tool['type'] == 'file_search': + re.append(FileSearch(**tool)) + elif tool['type'] == 'related_threads': + re.append(RelatedThreads(**tool)) + elif tool['type'] == 'function': + re.append(FuntionTool(**tool)) + else: + raise ValueError('Invalid type for tools') + return re + + tool_resources: List[ToolResource] = Field([], max_length=128) + + @field_validator('tool_resources', mode='before') + def validate_tool_resources(cls, value): + re = [] + if not isinstance(value, list): + raise ValueError('Invalid type for tool resources') + + for tool_re in value: + if 'file_ids' in tool_re: + re.append(CodeInterpreterResource(**tool_re)) + elif 'vector_stores' in tool_re: + re.append(FileSearchResource(**tool_re)) + elif 'thread_ids' in tool_re: + re.append(RelatedThreadsResource(**tool_re)) + else: + raise ValueError('Invalid type for tool resources') + return re + + meta_data: Metadata = MetadataField + + @model_validator(mode='before') + def convert_meta_data(cls, values): + if 'meta_data' in values: + values['metadata'] = values['meta_data'] + return values + temperature: Optional[float] = Field(ge=0.0, le=2.0, default=1) + top_p: Optional[float] = Field(ge=0.0, le=1.0, default=1) + response_format: Union[str, Dict[str, str]] = "auto" + + +class AssistantCreate(AssistantBase): + model: str + + +class AssistantBuildStatus(BaseModel): + class Status(Enum): + not_build = "not_build" + in_queue = "in_queue" + parsing = "parsing" + prefilling = "prefilling" + dumping = "dumping" + completed = "completed" + paused = "paused" + + _lock: Lock = PrivateAttr(default_factory=Lock) + _queue: Optional[Queue] = PrivateAttr(None) + + status: Status = Field(default=Status.not_build) + total_file_count: int = Field(default=0) + parsed_file_count: int = Field(default=0) + + prefilling_current: int = Field(default=0) + prefilling_total: int = Field(default=0) + + build_started_time: Optional[int] = Field(default=None) + build_completed_time: Optional[int] = Field(default=None) + + # in megabytes + assistant_usage: int = Field(default=0, description='') + assistant_total_usage: int = Field(default=0) + disk_free_space: int = Field(default=0) + disk_total_space: int = Field(default=0) + + def to_stream_reply(self) -> str: + return f"event: assistant.build.status\ndata: {self.model_dump_json()}\n\n" + + +class AssistantObject(AssistantBase, ObjectWithCreatedTime): + model: Optional[str] = Field( + default=Config().model_name) + related_threads_objects: Optional[List] = Field(None, exclude=True) + _encoded_instruction: Optional[torch.Tensor] = PrivateAttr(default=None) + build_status: AssistantBuildStatus = Field(default=AssistantBuildStatus()) + + def as_api_response(self): + return self.model_dump(exclude={'build_status'}) + + def get_related_threads_ids(self) -> List[ObjectID]: + re = [] + for tool, tool_re in zip(self.tools, self.tool_resources): + if tool.type == ToolType.RELATED_THREADS: + re += tool_re.thread_ids or [] + return re + + def get_related_threads_objects(self) -> List: + # raise NotImplementedError # should be replaced + sql_utils = SQLUtil() + if self.related_threads_objects is None: + with sql_utils.get_db() as db: + db_threads = db.query(Thread).all() + self.related_threads_objects = [tool for tool in [ThreadObject.model_validate( + tool.__dict__) for tool in db_threads] if tool.is_related_threads and tool.meta_data['assistant_id'] == self.id] + # logger.debug( + # f'Found {len(self.related_threads_objects)} related threads') + return self.related_threads_objects + + def append_related_threads(self, thread_ids: List[ObjectID]): + # logger.debug(f'{self.tools} {self.tool_resources}') + for tool, tool_re in zip(self.tools, self.tool_resources): + if tool.type == ToolType.RELATED_THREADS: + tool_re.thread_ids += thread_ids + return + + self.tools.append(RelatedThreads(type=ToolType.RELATED_THREADS)) + self.tool_resources.append( + RelatedThreadsResource(thread_ids=thread_ids)) + + async def update_build_status(self, events: AsyncIterable) -> AsyncIterable: + async for event in events: + # logger.debug(event) + if isinstance(event, RunStreamResponse): + if event.event == RunObject.Status.completed: + self.build_status.status = AssistantBuildStatus.Status.completed + self.build_status.build_completed_time = int(time()) + self.sync_db() + yield self.build_status.model_copy() + elif isinstance(event, dict): + # logger.debug('dict') + if 'stage' in event: + if event['stage'] == 'prefill': + self.build_status.status = AssistantBuildStatus.Status.prefilling + self.build_status.prefilling_current = event['curr_progress'] + self.build_status.prefilling_total = event['max_progress'] + if event['stage'] == 'parse': + self.build_status.status = AssistantBuildStatus.Status.parsing + self.build_status.parsed_file_count = event['curr_progress'] + self.build_status.total_file_count = event['max_progress'] + yield self.build_status.model_copy() + + def get_build_status(self) -> AssistantBuildStatus: + return self.build_status + + + def sync_db(self)->None: + # raise NotImplementedError # should be replaced + sql_utils = SQLUtil() + db_assistant = Assistant( + **self.model_dump(mode='json'), + ) + with sql_utils.get_db() as db: + sql_utils.db_merge_commit(db, db_assistant) + + def get_encoded_instruction(self,encode_fn:Callable)->torch.Tensor: + if self._encoded_instruction is None: + logger.info(f'encoding assistant instruction: {self.instructions}') + self._encoded_instruction = encode_fn(self.instructions, Role.user) + return self._encoded_instruction + + +class AssistantModify(AssistantBase): + model: Optional[str] = None + + +# Non API Backend diff --git a/ktransformers/server/schemas/assistants/messages.py b/ktransformers/server/schemas/assistants/messages.py new file mode 100644 index 0000000..b65ca7c --- /dev/null +++ b/ktransformers/server/schemas/assistants/messages.py @@ -0,0 +1,213 @@ +from enum import Enum +from typing import ForwardRef, List, Optional, Union,Callable + +import torch +from pydantic import BaseModel, PrivateAttr, model_validator + +from ktransformers.server.exceptions import not_implemented +from ktransformers.server.config.log import logger +from ktransformers.server.models.assistants.messages import Message +from ktransformers.server.schemas.base import Metadata, MetadataField, ObjectWithCreatedTime +from ktransformers.server.schemas.assistants.tool import Field,CodeInterpreter,FileSearch +from ktransformers.server.utils.sql_utils import SQLUtil + + +class IncompleteDetails(BaseModel): + reason: str + + +class ContentType(Enum): + image_file = "image_file" + image_url = "image_url" + text = "text" + + +class ContentObject(BaseModel): + type: ContentType + + +class ImageFile(BaseModel): + file_id: str + detail: str + + +class ImageFileObject(ContentObject): + image_file: ImageFile + + +class ImageUrl(BaseModel): + url: str + detail: str + + +class ImageUrlObject(ContentObject): + image_url: ImageUrl + + +class Annotation(BaseModel): + todo: str + + +class Text(BaseModel): + value: str + annotations: List[Annotation] = Field(default=[]) + + +class TextObject(ContentObject): + text: Text + delta_index: int = Field(default=0,exclude=True) + special_tokens_on: bool = Field(default=False,exclude=True) + last_two: str= Field(default='',exclude=True) + + def filter_append(self,text:str): + self.text.value+=text + self.delta_index+=1 + return True + + + +Content = Union[ImageFileObject, ImageUrlObject, TextObject] + + +class Attachment(BaseModel): + file_id: Optional[str] = Field(default=None) + tools: Optional[List[Union[CodeInterpreter, FileSearch]]] = Field(default=None) + + +class Role(Enum): + user = "user" + assistant = "assistant" + + def is_user(self)->bool: + return self == Role.user + + +class MessageCore(BaseModel): + role: Role + content: List[Content] + attachments: Optional[List[Attachment]] + meta_data: Metadata = MetadataField + @model_validator(mode='before') + @classmethod + def convert_meta_data(cls,values): + if 'meta_data' in values: + values['metadata'] = values['meta_data'] + return values + + +class MessageBase(MessageCore): + class Status(Enum): + created = "created" # only used for stream + in_progress = "in_progress" + incomplete = "incomplete" + completed = "completed" + thread_id: str + status: Status + incomplete_details: Optional[IncompleteDetails] = None + completed_at: Optional[int] = None + incomplete_at: Optional[int] = None + + assistant_id: Optional[str] = None + run_id: Optional[str] + + +MessageStreamResponse = ForwardRef('MessageStreamResponse') + +class MessageObject(MessageBase, ObjectWithCreatedTime): + _encoded_content: Optional[torch.Tensor] = PrivateAttr(default=None) + + + def get_text_content(self) -> str: + text_content = "" + for content in self.content: + if content.type == ContentType.text: + text_content += content.text.value + else: + raise not_implemented("Content other than text") + return text_content + + async def get_encoded_content(self,encode_fn:Callable): + if self._encoded_content is None: + logger.info(f'encoding {self.role.value} message({self.status.value}): {self.get_text_content()}') + self._encoded_content = encode_fn(self.get_text_content(),self.role) + + for f in self.get_attached_files(): + logger.info(f'encoding file: {f.filename}') + self._encoded_content = torch.cat([self._encoded_content, encode_fn(await f.get_str(),self.role)],dim=-1) + yield None + + yield self._encoded_content + + + def get_attached_files(self): + raise NotImplementedError # should be replaced + + + + def append_message_delta(self,text:str): + raise NotImplementedError # should be replaced + + def sync_db(self): + # raise NotImplementedError # should be replaced + sql_utils = SQLUtil() + db_message = Message( + **self.model_dump(mode="json"), + ) + with sql_utils.get_db() as db: + sql_utils.db_merge_commit(db, db_message) + + + def stream_response_with_event(self, event: MessageBase.Status) -> MessageStreamResponse: + match event: + case MessageObject.Status.created: + self.status = MessageObject.Status.in_progress + case _: + self.status = event + return MessageStreamResponse(message=self, event=event) + + +class MessageStreamResponse(BaseModel): + message: MessageObject + event: MessageObject.Status + + def to_stream_reply(self): + return f"event: thread.message.{self.event.value}\ndata: {self.message.model_dump_json()}\n\n" + + +class MessageCreate(BaseModel): + role: Role = Field(default=Role.user) + content: Union[str | List[Content]] + attachments: Optional[List[Attachment]] = None + meta_data: Metadata = MetadataField + @model_validator(mode='before') + @classmethod + def convert_meta_data(cls,values): + if 'meta_data' in values: + values['metadata'] = values['meta_data'] + return values + + def to_core(self) -> MessageCore: + # logger.debug(f"Converting message create to core {self.model_dump()}") + core = MessageCore( + role=self.role, + content=[], + attachments=self.attachments, + meta_data=self.meta_data, + ) + if isinstance(self.content, str): + core.content = [TextObject(type="text", text=Text(value=self.content, annotations=[]))] + elif isinstance(self.content, list): + core.content = self.content + else: + raise ValueError("Invalid content type") + return core + + +class MessageModify(BaseModel): + meta_data: Metadata = MetadataField + @model_validator(mode='before') + @classmethod + def convert_meta_data(cls,values): + if 'meta_data' in values: + values['metadata'] = values['meta_data'] + return values diff --git a/ktransformers/server/schemas/assistants/runs.py b/ktransformers/server/schemas/assistants/runs.py new file mode 100644 index 0000000..a76dd74 --- /dev/null +++ b/ktransformers/server/schemas/assistants/runs.py @@ -0,0 +1,201 @@ +from enum import Enum +from typing import Dict, List, Optional, Union, ForwardRef + +from pydantic import BaseModel, Field, model_validator + +from ktransformers.server.models.assistants.runs import Run +from ktransformers.server.schemas.base import TODO, Metadata, MetadataField, ObjectWithCreatedTime +from ktransformers.server.schemas.assistants.threads import ThreadCreate +from ktransformers.server.schemas.assistants.tool import Tool, ToolResource +from ktransformers.server.utils.sql_utils import SQLUtil + + +class ToolCall(BaseModel): + id: str + type: str + function: TODO + + +class SubmitToolOutputs(BaseModel): + tool_calls: List[ToolCall] + + +class RequiredAction(BaseModel): + type: str + submit_tool_outputs: TODO + + +class LastError(BaseModel): + code: str + message: str + + +class IncompleteDetails(BaseModel): + reason: str + + +class Usage(BaseModel): + completion_tokens: int + prompt_tokens: int + total_tokens: int + + +class TruncationStrategy(BaseModel): + type: str = "auto" + last_message: Optional[int] + + +class ToolChoiceType(Enum): + none = "none" + auto = "auto" + required = "required" + + +class RunBase(BaseModel): + class Status(Enum): + created = "created" # only stream event will have this created status + queued = "queued" + in_progress = "in_progress" + requires_action = "requires_action" + cancelling = "cancelling" + cancelled = "cancelled" + failed = "failed" + completed = "completed" + expired = "expired" + + + thread_id: str + assistant_id: str + status: Status = Status.queued + required_action: Optional[RequiredAction] = Field(None) + last_error: Optional[LastError] = Field(None) + expires_at: Optional[int]= Field(None) + started_at: Optional[int] = Field(None) + cancelled_at: Optional[int] = Field(None) + failed_at: Optional[int] = Field(None) + completed_at: Optional[int] = Field(None) + incomplete_details: Optional[IncompleteDetails] = Field(None) + model: Optional[str] = Field(None) + instructions: Optional[str] = Field(None) + tools: Optional[List[Tool]] = Field([]) + meta_data: Metadata = MetadataField + @model_validator(mode='before') + @classmethod + def convert_meta_data(cls,values): + if 'meta_data' in values: + values['metadata'] = values['meta_data'] + return values + + def set_compute_save(self,save:int): + self.meta_data['compute_save'] = str(save) + + + usage: Optional[Usage] = Field(None) + temperature: Optional[float] = Field(None) + top_p: Optional[float]= Field(None) + max_propmp_tokens: Optional[int]= Field(None) + truncation_strategy: Optional[TruncationStrategy]= Field(None) + tool_choice: Optional[Union[ToolChoiceType, dict]]= Field(None) + response_format: Union[str, Dict[str, str]] = "auto" + + +RunStreamResponse = ForwardRef('RunStreamResponse') + +class RunObject(RunBase, ObjectWithCreatedTime): + def stream_response_with_event(self,event:RunBase.Status)->RunStreamResponse: + match event: + case RunBase.Status.created: + self.status = RunBase.Status.queued + case _: + self.status = event + return RunStreamResponse(run=self, event=event) + + + def sync_db(self): + # raise NotImplementedError # should be replaced in crud + sql_utils = SQLUtil() + db_run = Run( + **self.model_dump(mode='json'), + ) + with sql_utils.get_db() as db: + sql_utils.db_merge_commit(db, db_run) + + def create_message_creation_step(self): + raise NotImplementedError # should be replaced + + +class RunStreamResponse(BaseModel): + run: RunObject + event: RunObject.Status + def to_stream_reply(self): + return f"event: thread.run.{self.event.value}\ndata: {self.run.model_dump_json()}\n\n" + +class RunCreate(BaseModel): + assistant_id: str + model: Optional[str] = Field(default=None) + instructions: Optional[str] = Field(default=None) + # TODO: Add this + # additional_instructions: Optional[str] + # additional_messages: Optional[List[MessageCore]] + tools: List[Tool] = Field(default=[]) + meta_data: Metadata = MetadataField + @model_validator(mode='before') + @classmethod + def convert_meta_data(cls,values): + if 'meta_data' in values: + values['metadata'] = values['meta_data'] + return values + temperature: Optional[float] = Field(default=None) + top_p: Optional[float] = Field(default=None) + stream: Optional[bool] = Field(default=None) + max_propmp_tokens: Optional[int] = Field(default=None) + # TODO: Add this + # max_completion_tokens: Optional[int] + truncation_strategy: Optional[TruncationStrategy] = Field(default=None) + tool_choice: Optional[Union[ToolChoiceType, dict]] = Field(default=None) + response_format: Union[str, Dict[str, str]] = Field(default="auto") + + +class RunThreadCreate(BaseModel): + assistant_id: str + thread: Optional[ThreadCreate] + model: Optional[str] + instructions: Optional[str] + tools: List[Tool] + tool_resources: List[ToolResource] + meta_data: Metadata = MetadataField + @model_validator(mode='before') + @classmethod + def convert_meta_data(cls,values): + if 'meta_data' in values: + values['metadata'] = values['meta_data'] + return values + temperature: Optional[float] + top_p: Optional[float] + stream: Optional[bool] + max_propmp_tokens: Optional[int] + # TODO: Add this + # max_completion_tokens: Optional[int] + truncation_strategy: TruncationStrategy + tool_choice: Union[ToolChoiceType, dict] + response_format: Union[str, Dict[str, str]] = "auto" + + +class RunModify(BaseModel): + meta_data: Metadata = MetadataField + @model_validator(mode='before') + @classmethod + def convert_meta_data(cls,values): + if 'meta_data' in values: + values['metadata'] = values['meta_data'] + return values + + +class ToolOutput(BaseModel): + tool_call_id: Optional[str] + output: Optional[str] + + +class RunSubmit(BaseModel): + tool_outputs: List[ToolOutput] + stream: Optional[bool] diff --git a/ktransformers/server/schemas/assistants/streaming.py b/ktransformers/server/schemas/assistants/streaming.py new file mode 100644 index 0000000..c5d8a04 --- /dev/null +++ b/ktransformers/server/schemas/assistants/streaming.py @@ -0,0 +1,169 @@ +import asyncio +from typing import AsyncIterable, List, Union + +from fastapi import Request +from fastapi.responses import StreamingResponse +from pydantic import BaseModel + +from ktransformers.server.schemas.assistants.runs import RunStreamResponse +from ktransformers.server.schemas.endpoints.chat import ChatCompletionChunk +from ktransformers.server.config.log import logger +from ktransformers.server.schemas.base import Object +from ktransformers.server.schemas.assistants.messages import ContentType, ImageFileObject, ImageUrlObject, MessageObject, Text, TextObject + + +class TextObjectWithIndex(TextObject): + index: int + + +class ImageFileObjectWithIndex(ImageFileObject): + index: int + + +class ImageUrlObjectWithIndex(ImageUrlObject): + index: int + + +ContentWithIndex = Union[TextObjectWithIndex, + ImageFileObjectWithIndex, ImageUrlObjectWithIndex] + + +class MessageDeltaImpl(BaseModel): + # role: Optional[str] + content: List[ContentWithIndex] + + +class MessageDelta(Object): + delta: MessageDeltaImpl + + def to_stream_reply(self): + return f"event: thread.message.delta\ndata: {self.model_dump_json()}\n\n" + + +def text_delta(index: int, text: str): + return MessageDeltaImpl(content=[TextObjectWithIndex(index=index, type=ContentType.text, text=Text(value=text))]) + + +def append_message_delta(self: MessageObject, text: str): + + if len(self.content) == 0: + self.content.append(TextObject(type=ContentType.text, + text=Text(value=''), delta_index=0)) + + text_object: TextObject = self.content[0] + if text_object.filter_append(text): + return MessageDelta(id=self.id, object="thread.message.delta", delta=text_delta(text_object.delta_index, text)) + else: + return None + + +MessageObject.append_message_delta = append_message_delta + + +class RunStepDeltaImpl(BaseModel): + pass + + +class RunStepDelta(Object): + delta: RunStepDeltaImpl + + def to_stream_reply(self): + return f"event: thread.run.step.delta\ndata: {self.model_dump_json()}\n\n" + + +class Done(): + def to_stream_reply(self): + return f"event: done\ndata: [DONE]\n\n" + + +async def check_client_link(request: Request, async_events: AsyncIterable): + async for event in async_events: + if await request.is_disconnected(): + break + yield event + + +async def add_done(async_events: AsyncIterable): + async for event in async_events: + yield event + yield Done() + + +async def to_stream_reply(async_events: AsyncIterable): + async for event in async_events: + if isinstance(event, str): + yield event + else: + yield event.to_stream_reply() + + +async def filter_api_event(async_events: AsyncIterable): + async for event in async_events: + if isinstance(event, MessageDelta) or isinstance(event, RunStepDelta) or isinstance(event, RunStreamResponse) or isinstance(event, Done): + yield event + + +async def filter_chat_chunk(async_events: AsyncIterable): + async for event in async_events: + if isinstance(event, ChatCompletionChunk): + yield event + + +async def filter_by_types(async_events: AsyncIterable, types: List): + async for event in async_events: + for type in types: + if isinstance(event, type): + yield event + continue + + +def api_stream_response(request: Request, async_events: AsyncIterable): + return StreamingResponse(check_client_link(request, to_stream_reply(add_done(filter_api_event(async_events)))), media_type="text/event-stream") + + +def chat_stream_response(request: Request, async_events: AsyncIterable): + return StreamingResponse(check_client_link(request, to_stream_reply(add_done(filter_chat_chunk(async_events)))), media_type="text/event-stream") + + +def stream_response(request: Request, async_events: AsyncIterable): + return StreamingResponse(check_client_link(request, to_stream_reply(add_done(async_events))), media_type="text/event-stream") + + +def check_link_response(request: Request, async_events: AsyncIterable): + return StreamingResponse(check_client_link(request, async_events), media_type="text/event-stream") + + +def wrap_async_generator_into_queue(async_events: AsyncIterable) -> asyncio.Queue: + queue = asyncio.Queue() + + async def inner(): + # logger.debug('run inner') + async for event in async_events: + # logger.debug(f'put: {event}') + await queue.put(event) + await asyncio.sleep(0) + # logger.debug(f'put: None') + await queue.put(None) + asyncio.create_task(inner()) + return queue + + +async def unwrap_async_queue(queue: asyncio.Queue) -> AsyncIterable: + while True: + events = [await queue.get()] + events.extend([queue.get_nowait() for _ in range(queue.qsize())]) + + logger.debug(f'getting {len(events)} events') + for event in events: + if event is None: + break + yield event + + +async def unwrap_async_queue_slow(queue: asyncio.Queue) -> AsyncIterable: + while True: + event = await queue.get() + # logger.debug(f'unwrap_async_queue {event}') + if event is None: + break + yield event diff --git a/ktransformers/server/schemas/assistants/threads.py b/ktransformers/server/schemas/assistants/threads.py new file mode 100644 index 0000000..cf1c382 --- /dev/null +++ b/ktransformers/server/schemas/assistants/threads.py @@ -0,0 +1,49 @@ +from enum import Enum +from typing import List +from typing_extensions import Self + +from pydantic import BaseModel, Field, model_validator + +from ktransformers.server.schemas.base import Metadata, MetadataField, ObjectWithCreatedTime +from ktransformers.server.schemas.assistants.tool import ToolResource +from ktransformers.server.schemas.assistants.messages import MessageCore + + +class ThreadBase(BaseModel): + meta_data: Metadata = MetadataField + @model_validator(mode='before') + @classmethod + def convert_meta_data(cls,values): + if 'meta_data' in values: + values['metadata'] = values['meta_data'] + return values + + tool_resources: List[ToolResource] = Field([], max_length=128) + + +class ThreadObject(ThreadBase, ObjectWithCreatedTime): + is_related_threads:bool = Field(False,exclude=True) + + @model_validator(mode='after') + def check_is_related_threads(self)->Self: + # logger.debug(f'check thread {self.id} is related thread? by {self}') + if 'assistant_id' in self.meta_data: + self.is_related_threads = True + return self + + class StreamEvent(Enum): + created = 'created' + + def to_stream_reply(self,event:StreamEvent): + return f"event: thread.{event.value}\ndata: {self.model_dump_json()}\n\n" + + +class ThreadCreate(ThreadBase): + messages: List[MessageCore] = Field(default=[]) + + +class ThreadModify(ThreadBase): + pass + + +# other than OpenAI API diff --git a/ktransformers/server/schemas/assistants/tool.py b/ktransformers/server/schemas/assistants/tool.py new file mode 100644 index 0000000..27fdcae --- /dev/null +++ b/ktransformers/server/schemas/assistants/tool.py @@ -0,0 +1,54 @@ +from enum import Enum +from typing import List, Optional, Union + +from pydantic import BaseModel, Field + +from ktransformers.server.schemas.base import ObjectID + + +class ToolType(str, Enum): + CODE_INTERPRETER = "code_interpreter" + FILE_SEARCH = "file_search" + RELATED_THREADS = "related_threads" + FUNCTION = "function" + + +class ToolBase(BaseModel): + type: ToolType + + +class CodeInterpreter(ToolBase): + pass + + +class FileSearch(ToolBase): + pass + + +class RelatedThreads(ToolBase): + pass + + +class FuntionTool(ToolBase): + description: str + name: str + parameters: List[str] + + +Tool = Union[CodeInterpreter, FileSearch, RelatedThreads, FuntionTool] + + +class CodeInterpreterResource(BaseModel): + file_ids: Optional[List[str]] = Field(default_factory=list, max_length=20) + + +class FileSearchResource(BaseModel): + vector_store_ids: Optional[List[str]] = Field(default_factory=list, max_length=1) + vector_stores: Optional[List[str]] = Field(default_factory=list, max_length=1) + + +class RelatedThreadsResource(BaseModel): + thread_ids: List[ObjectID] = Field(default=[]) + + +ToolResource = Union[CodeInterpreterResource,FileSearchResource,RelatedThreadsResource] diff --git a/ktransformers/server/schemas/base.py b/ktransformers/server/schemas/base.py new file mode 100644 index 0000000..35058dc --- /dev/null +++ b/ktransformers/server/schemas/base.py @@ -0,0 +1,46 @@ +from enum import Enum +from typing import Dict + +import sqlalchemy +from pydantic import BaseModel, ConfigDict, Field + +TODO = BaseModel + +ObjectID = str + + +class Object(BaseModel): + id: ObjectID + object: str + + model_config = ConfigDict(from_attributes=True) + + +# Pydantic Base Models +class ObjectWithCreatedTime(Object): + created_at: int + + + +class Order(str, Enum): + ASC = "asc" + DESC = "desc" + + def to_sqlalchemy_order(self): + match self: + case Order.ASC: + return sqlalchemy.asc + case Order.DESC: + return sqlalchemy.desc + + +Metadata = Dict[str, str] +MetadataField: Metadata = Field({},max_length=16, alias="metadata") + + +class DeleteResponse(Object): + deleted: bool = True + +class OperationResponse(BaseModel): + operation: str + status: str diff --git a/ktransformers/server/schemas/conversation.py b/ktransformers/server/schemas/conversation.py new file mode 100644 index 0000000..c64ea92 --- /dev/null +++ b/ktransformers/server/schemas/conversation.py @@ -0,0 +1,12 @@ +from typing import Optional + +from pydantic import BaseModel + +from .assistants.assistants import AssistantObject +from .assistants.threads import ThreadObject +from .assistants.messages import MessageObject + +class ThreadPreview(BaseModel): + assistant: Optional[AssistantObject] = None + thread: ThreadObject + first_message: Optional[MessageObject] = None diff --git a/ktransformers/server/schemas/endpoints/chat.py b/ktransformers/server/schemas/endpoints/chat.py new file mode 100644 index 0000000..5c4dc4e --- /dev/null +++ b/ktransformers/server/schemas/endpoints/chat.py @@ -0,0 +1,78 @@ +from typing import List, Optional +from enum import Enum + +from pydantic import BaseModel + +from ktransformers.server.schemas.base import Object + +class Role(Enum): + system = 'system' + user = 'user' + assistant = 'assistant' + tool = 'tool' + function = 'function' + + +class Message(BaseModel): + content: str + role:Role + name: Optional[str] = None + def to_tokenizer_message(self): + return {'content':self.content,'role':self.role.value} + + +class ChatCompletionCreate(BaseModel): + messages: List[Message] + model : str + stream : bool = False + + def get_tokenizer_messages(self): + return [m.to_tokenizer_message() for m in self.messages] + +class FinishReason(Enum): + stop = 'stop' + length = 'length' + +class Choice(BaseModel): + index: int + message: Message + logprobs: Optional[str] = None + finish_reason: FinishReason = None + +class DeltaChoice(BaseModel): + index: int + delta: Message + logprobs: Optional[str] = None + finish_reason: FinishReason = None + + +class Usage(BaseModel): + completion_tokens:int + prompt_tokens:int + total_tokens:int + + +class ChatCompletionBase(Object): + created:int + model:str = 'not implmented' + system_fingerprint:str = 'not implmented' + usage: Optional[Usage] = None + +class ChatCompletionObject(ChatCompletionBase): + choices:List[Choice] = [] + + def append_token(self,token:str): + if len(self.choices) == 0: + self.choices.append(Choice(index=0,message=Message(content='',role=Role.assistant))) + self.choices[0].message.content += token + +class ChatCompletionChunk(ChatCompletionBase): + choices:List[DeltaChoice] = [] + + def set_token(self,token:str): + self.choices = [ + DeltaChoice(index=0,delta=Message(content=token,role=Role.assistant)) + ] + + def to_stream_reply(self): + return f"data:{self.model_dump_json()}\n\n" diff --git a/ktransformers/server/schemas/legacy/__init__.py b/ktransformers/server/schemas/legacy/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ktransformers/server/schemas/legacy/completions.py b/ktransformers/server/schemas/legacy/completions.py new file mode 100644 index 0000000..874e556 --- /dev/null +++ b/ktransformers/server/schemas/legacy/completions.py @@ -0,0 +1,48 @@ +from typing import List, Optional +from enum import Enum + +from pydantic import BaseModel + +from ..base import Object + +class CompletionCreate(BaseModel): + model: str + prompt: str | List[str] + stream: bool = False + + def get_tokenizer_messages(self): + if isinstance(self.prompt,List): + self.get_tokenizer_messages('\n'.join(self.prompt)) + return [{'content':self.prompt,'role':'user'}] + + +class FinishReason(Enum): + stop = 'stop' + length = 'length' + +class Choice(BaseModel): + index: int + text: str + logprobs: Optional[str] = None + finish_reason: FinishReason = None + + +class CompletionObject(Object): + created:int + choices: List[Choice] = [] + model:str = 'not implmented' + system_fingerprint:str = 'not implmented' + usage: Optional[str] = None + + def set_token(self,token:str): + if len(self.choices)==0: + self.choices.append(Choice(index=0,text='')) + self.choices[0].text = token + + def append_token(self,token:str): + if len(self.choices)==0: + self.choices.append(Choice(index=0,text='')) + self.choices[0].text += token + + def to_stream_reply(self): + return f"data:{self.model_dump_json()}\n\n" diff --git a/ktransformers/server/utils/__init__.py b/ktransformers/server/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ktransformers/server/utils/create_interface.py b/ktransformers/server/utils/create_interface.py new file mode 100644 index 0000000..af0a331 --- /dev/null +++ b/ktransformers/server/utils/create_interface.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Description : +Author : qiyuxinlin +Date : 2024-07-25 11:50:16 +Version : 1.0.0 +LastEditors : qiyuxinlin +LastEditTime : 2024-07-25 12:54:48 +Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +''' +from ktransformers.server.config.config import Config +from ktransformers.server.backend.args import ConfigArgs +from ktransformers.server.backend.context_manager import ThreadContextManager +from ktransformers.server.backend.interfaces.exllamav2 import ExllamaInterface +from ktransformers.server.backend.interfaces.transformers import TransformersInterface +from ktransformers.server.backend.interfaces.ktransformers import KTransformersInterface +def create_interface(config: Config, default_args: ConfigArgs): + if config.backend_type=='transformers': + from ktransformers.server.backend.interfaces.transformers import TransformersInterface as BackendInterface + elif config.backend_type == 'exllamav2': + from ktransformers.server.backend.interfaces.exllamav2 import ExllamaInterface as BackendInterface + elif config.backend_type == 'ktransformers': + from ktransformers.server.backend.interfaces.ktransformers import KTransformersInterface as BackendInterface + else: + raise NotImplementedError(f'{config.backend_type} not implemented') + GlobalInterface.interface = BackendInterface(default_args) + GlobalContextManager.context_manager = ThreadContextManager(GlobalInterface.interface) + +class GlobalContextManager: + context_manager: ThreadContextManager +class GlobalInterface: + interface: TransformersInterface | KTransformersInterface | ExllamaInterface + +def get_thread_context_manager() -> ThreadContextManager: + return GlobalContextManager.context_manager +def get_interface() -> TransformersInterface | KTransformersInterface | ExllamaInterface: + return GlobalInterface.interface \ No newline at end of file diff --git a/ktransformers/server/utils/multi_timer.py b/ktransformers/server/utils/multi_timer.py new file mode 100644 index 0000000..dbb54ca --- /dev/null +++ b/ktransformers/server/utils/multi_timer.py @@ -0,0 +1,79 @@ +import time + + +def format_time(seconds): + units = [ + ("hours", 3600), + ("minutes", 60), + ("seconds", 1), + ("milliseconds", 1e-3), + ("microseconds", 1e-6), + ] + + for unit_name, unit_value in units: + if seconds >= unit_value: + time_value = seconds / unit_value + return f"{time_value:.2f} {unit_name}" + return "0 seconds" # Handle case for 0 seconds + + +class Profiler: + def __init__(self): + self.timers = {} + self.counters = {} + + def create_timer(self, name): + self.timers[name] = { + "start_time": None, + "elapsed_time": 0, + "running": False, + } + + def start_timer(self, name): + if name not in self.timers: + raise ValueError(f"Timer '{name}' does not exist.") + if self.timers[name]["running"]: + raise ValueError(f"Timer '{name}' is already running.") + self.timers[name]["start_time"] = time.time() + self.timers[name]["running"] = True + + def pause_timer(self, name): + if name not in self.timers: + raise ValueError(f"Timer '{name}' does not exist.") + if not self.timers[name]["running"]: + raise ValueError(f"Timer '{name}' is not running.") + self.timers[name]["elapsed_time"] += time.time() - self.timers[name]["start_time"] + self.timers[name]["running"] = False + + def get_timer_sec(self, name): + if name not in self.timers: + raise ValueError(f"Timer '{name}' does not exist.") + if self.timers[name]["running"]: + current_time = self.timers[name]["elapsed_time"] + (time.time() - self.timers[name]["start_time"]) + else: + current_time = self.timers[name]["elapsed_time"] + return current_time + + def get_all_timers(self): + all_timers = {} + for name in self.timers: + all_timers[name] = self.get_timer_sec(name) + return all_timers + + def report_timer_string(self, name): + return f"{name} elapsed time: {format_time(self.get_timer_sec(name))}" + + def create_and_start_timer(self, name): + self.create_timer(name) + self.start_timer(name) + + + # Counter + def inc(self,key:str,delta:int=1): + self.counters[key] = self.counters.get(key,0) + delta + + def set_counter(self,key:str,to=0): + self.counters[key] = to + + def get_counter(self,key:str): + return self.counters.get(key,0) diff --git a/ktransformers/server/utils/sql_utils.py b/ktransformers/server/utils/sql_utils.py new file mode 100644 index 0000000..963cc23 --- /dev/null +++ b/ktransformers/server/utils/sql_utils.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Description : +Author : chenxl +Date : 2024-06-12 09:12:58 +Version : 1.0.0 +LastEditors : chenxl +LastEditTime : 2024-07-27 01:56:04 +''' + +from urllib.parse import urlparse +import os +from contextlib import contextmanager +from sqlalchemy import create_engine +from sqlalchemy.orm import Session, sessionmaker, declarative_base + +from ktransformers.server.config.config import Config +from ktransformers.server.config.singleton import Singleton +from ktransformers.server.config.log import logger +from ktransformers.server.exceptions import db_exception + + +Base = declarative_base() + + +class SQLUtil(metaclass=Singleton): + """ + database connections init and management + """ + sqlalchemy_engine = None + session_local = None + + def __init__(self) -> None: + self.cfg: Config = Config() + if not self.sqlalchemy_engine: + SQLUtil.init_engine(self.cfg) + + @contextmanager + def get_db(self): + """ + After you finish using the session, it's crucial to close it. + """ + if not SQLUtil.sqlalchemy_engine: + SQLUtil.init_engine(self.cfg) + session = self.session_local() # type: ignore pylint: disable=not-callable + try: + yield session + finally: + session.close() + + @staticmethod + def init_engine(cfg: Config): + """ + initial engine and session maker Factory + """ + pool_size = cfg.db_pool_size + if SQLUtil.sqlalchemy_engine is None: + if cfg.db_type == "sqllite": + db_url = SQLUtil.create_sqllite_url(cfg) + else: + logger.error("Unsupported database type %s", cfg.db_type) + exit(-1) + SQLUtil.sqlalchemy_engine = create_engine( + db_url, connect_args={"check_same_thread": False}, pool_size=pool_size) + SQLUtil.session_local = sessionmaker( + autocommit=False, autoflush=False, bind=SQLUtil.sqlalchemy_engine) + + @staticmethod + def create_sqllite_url(cfg): + """ + create and validate SQLLite url + """ + path: str = cfg.db_host + database: str = cfg.db_database + absolute_path: str = os.path.join(path, database) + url = 'sqlite:///' + absolute_path + try: + result = urlparse(url) + if all([result.scheme, result.path, result.scheme == 'sqlite']): + return url + else: + logger.error("invalid sqllite url: %s", url) + exit(-1) + except ValueError: + logger.error("invalid sqllite url: %s", url) + exit(-1) + + def db_add_commit_refresh(self, session: Session, what): + """ + add data to database + """ + try: + session.add(what) + session.commit() + session.refresh(what) + except Exception as e: + logger.exception("db commit error with data %s", str(what.__dict__)) + ex = db_exception() + ex.detail = str(e) + session.rollback() + raise ex from e + + def db_merge_commit(self, session: Session, what): + try: + session.merge(what) + session.commit() + except Exception as e: + ex = db_exception() + ex.detail = str(e) + logger.exception("db merge commit error with data %s", str(what.__dict__)) + session.rollback() + raise ex from e + + def db_update_commit_refresh(self, session: Session, existing, what): + what = what.model_dump(mode="json") + try: + for key in what.keys(): + if what[key] is not None: # ๆฃ€ๆŸฅbไธญ็š„ๅญ—ๆฎตๆ˜ฏๅฆไธบNone + setattr(existing, key, what[key]) # ๆ›ดๆ–ฐa็š„ๅญ—ๆฎต + session.commit() + session.refresh(existing) + except Exception as e: + ex = db_exception() + ex.detail = str(e) + logger.exception("db update commit refresh error with data %s", str(what.__dict__)) + session.rollback() + raise ex from e diff --git a/ktransformers/tests/dequant_gpu.py b/ktransformers/tests/dequant_gpu.py new file mode 100644 index 0000000..4fbca1c --- /dev/null +++ b/ktransformers/tests/dequant_gpu.py @@ -0,0 +1,57 @@ +import os +os.environ["CUDA_VISIBLE_DEVICES"]="1" +# add path +import sys +current_path = os.path.abspath(os.path.dirname(__file__)) +sys.path.append(current_path+"/../..") +import pycuda.autoinit +import pycuda.driver as cuda +from pycuda.compiler import SourceModule +import numpy as np +# from ktransformers.operators.linear import KTransformerLinear, QuantizedLinearMarlin +# from ktransformers.operators.experts import KTransformersMLPExpert, MLPExpertsTorch +from ktransformers.util.custom_gguf import GGUFLoader +import torch +import KTransformersOps +torch.set_default_dtype(torch.bfloat16) +import time +from transformers import ( + AutoConfig, +) + +gguf_config = GGUFLoader("/data/Qwen2-57B-A14B-Instruct-GGUF/q4_k_m") +model_name = "/data/Qwen2-57B-A14B-Instruct" +key = "blk.0." +target = "ffn_down_exps.weight" + +t1 = time.time() +q_weight_cpu = gguf_config.load_gguf_tensor(key+target, "cpu") +# q_weight_cpu = torch.from_numpy(q_weight_cpu) + +t2 = time.time() +q_weight_gpu = gguf_config.load_gguf_tensor(key+target, "cuda") +t3 = time.time() +print() +allclose = torch.allclose(q_weight_cpu, q_weight_gpu.cpu().to(torch.float32), atol=1e-6) +print(f"Q6k {key+target}") +print("load gguf tensor from cpu cost: ", t2-t1) +print("load gguf tensor from gpu cost: ", t3-t2) +print("allclose: ", allclose) + + +key = "blk.1." +target = "ffn_up_shexp.weight" + +t1 = time.time() +q_weight_cpu = gguf_config.load_gguf_tensor(key+target, "cpu") +# q_weight_cpu = torch.from_numpy(q_weight_cpu) + +t2 = time.time() +q_weight_gpu = gguf_config.load_gguf_tensor(key+target, "cuda") +t3 = time.time() +print() +allclose = torch.allclose(q_weight_cpu, q_weight_gpu.cpu(), atol=1e-6) +print(f"Q4k {key+target}") +print("load gguf tensor from cpu cost: ", t2-t1) +print("load gguf tensor from gpu cost: ", t3-t2) +print("allclose: ", allclose) diff --git a/ktransformers/tests/dequant_gpu_t.py b/ktransformers/tests/dequant_gpu_t.py new file mode 100644 index 0000000..3efcdf3 --- /dev/null +++ b/ktransformers/tests/dequant_gpu_t.py @@ -0,0 +1,40 @@ +import os +os.environ["CUDA_VISIBLE_DEVICES"]="1" +# add path +import sys +sys.path.append("../..") +import pycuda.autoinit +import pycuda.driver as cuda +from pycuda.compiler import SourceModule +import numpy as np +from ktransformers.operators.linear import KTransformerLinear, QuantizedLinearMarlin +from ktransformers.operators.experts import KTransformersMLPExpert, MLPExpertsTorch +from ktransformers.util.custom_gguf import GGUFLoader, dequantize_q4_k_gpu, dequantize_q4_k +import torch +import CudaOps +torch.set_default_dtype(torch.bfloat16) +import time +from transformers import ( + AutoConfig, +) + +gguf_config = GGUFLoader("/data/Qwen2-57B-A14B-Instruct-GGUF/q4_k_m") +model_name = "/data/Qwen2-57B-A14B-Instruct" +key = "blk.0." +target = "ffn_up_exps.weight" + +data = gguf_config.get_mmap_tensor(key + target) + +_, factors, offsets, qs1, qs2= dequantize_q4_k(data) +factors_cpu = torch.from_numpy(factors) +offsets_cpu = torch.from_numpy(offsets) +qs1_cpu = torch.from_numpy(qs1) +qs2_cpu = torch.from_numpy(qs2) + + +_, factors, offsets, qs1, qs2 = dequantize_q4_k_gpu(data) + +print(torch.allclose(factors.cpu(), factors_cpu)) +print(torch.allclose(offsets.cpu(), offsets_cpu)) +print(torch.allclose(qs1.cpu(), qs1_cpu)) +print(torch.allclose(qs2.cpu(), qs2_cpu)) \ No newline at end of file diff --git a/ktransformers/util/cuda_graph_runner.py b/ktransformers/util/cuda_graph_runner.py new file mode 100644 index 0000000..2ac7a17 --- /dev/null +++ b/ktransformers/util/cuda_graph_runner.py @@ -0,0 +1,73 @@ +''' +Description : +Author : Boxin Zhang +Version : 0.1.0 +Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +''' +import torch +from typing import Dict + +class CUDAGraphRunner: + + def __init__(self): + self.graph = None + self.input_buffers: Dict[str, torch.Tensor] = {} + self.output_buffers: Dict[str, torch.Tensor] = {} + + def capture( + self, + model, + cur_token, + position_ids, + cache_position, + past_key_values, + **kwargs, + ) -> None: + assert self.graph is None + # Capture the graph. + torch.cuda.synchronize() + self.graph = torch.cuda.CUDAGraph() + #self.graph.enable_debug_mode() + self.model = model + inputs_embeds = model.model.embed_tokens(cur_token.to("cpu")).to("cuda") + with torch.cuda.graph(self.graph): + logits=model(inputs_embeds=inputs_embeds, + position_ids=position_ids, + cache_position=cache_position, + past_key_values=past_key_values, + **kwargs)[0] + past_key_values.change_seq_length(-1) + torch.cuda.synchronize() + #self.graph.debug_dump("cuda_graph_hooked.dot") + + # Save the input and output buffers. + self.input_buffers = { + "inputs_embeds": inputs_embeds, + "position_ids": position_ids, + "cache_position": cache_position, + } + self.output_buffers = {"logits": logits} + return + + def forward( + self, + cur_token, + position_ids, + cache_position, + ) -> torch.Tensor: + # Copy the input tensors to the input buffers. + inputs_embeds = self.model.model.embed_tokens(cur_token.to("cpu")) + self.input_buffers["inputs_embeds"].copy_(inputs_embeds) + self.input_buffers["position_ids"].copy_(position_ids) + self.input_buffers["cache_position"].copy_(cache_position) + + # Run the graph. + #print("begin replay") + #time.sleep(1) + self.graph.replay() + torch.cuda.synchronize() + # Return the output tensor. + return self.output_buffers["logits"] + + def __call__(self, *args, **kwargs): + return self.forward(*args, **kwargs) diff --git a/ktransformers/util/custom_gguf.py b/ktransformers/util/custom_gguf.py new file mode 100644 index 0000000..643713e --- /dev/null +++ b/ktransformers/util/custom_gguf.py @@ -0,0 +1,679 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Description : +Author : Azure-Tang, Boxin Zhang, chenht2022 +Date : 2024-07-26 08:48:54 +Version : 1.0.0 +LastEditors : Azure +LastEditTime : 2024-07-26 09:28:25 +Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +''' +# copied from llama.cpp/gguf-py/gguf/constants.py to satisfy dependence of gguf +# GGUF specification +# https://github.com/ggerganov/ggml/blob/master/docs/gguf.md +import struct +import warnings +import numpy as np +import numpy.typing as npt +from typing import Sequence +import os +from enum import IntEnum +import torch +import KTransformersOps + +class GGMLQuantizationType(IntEnum): + F32 = 0 + F16 = 1 + Q4_0 = 2 + Q4_1 = 3 + Q5_0 = 6 + Q5_1 = 7 + Q8_0 = 8 + Q8_1 = 9 + Q2_K = 10 + Q3_K = 11 + Q4_K = 12 + Q5_K = 13 + Q6_K = 14 + Q8_K = 15 + IQ2_XXS = 16 + IQ2_XS = 17 + IQ3_XXS = 18 + IQ1_S = 19 + IQ4_NL = 20 + IQ3_S = 21 + IQ2_S = 22 + IQ4_XS = 23 + I8 = 24 + I16 = 25 + I32 = 26 + I64 = 27 + F64 = 28 + IQ1_M = 29 + BF16 = 30 + +QK_K = 256 +GGML_QUANT_SIZES: dict[GGMLQuantizationType, tuple[int, int]] = { + GGMLQuantizationType.F32: (1, 4), + GGMLQuantizationType.F16: (1, 2), + GGMLQuantizationType.Q4_0: (32, 2 + 16), + GGMLQuantizationType.Q4_1: (32, 2 + 2 + 16), + GGMLQuantizationType.Q5_0: (32, 2 + 4 + 16), + GGMLQuantizationType.Q5_1: (32, 2 + 2 + 4 + 16), + GGMLQuantizationType.Q8_0: (32, 2 + 32), + GGMLQuantizationType.Q8_1: (32, 4 + 4 + 32), + GGMLQuantizationType.Q2_K: (256, 2 + 2 + QK_K // 16 + QK_K // 4), + GGMLQuantizationType.Q3_K: (256, 2 + QK_K // 4 + QK_K // 8 + 12), + GGMLQuantizationType.Q4_K: (256, 2 + 2 + QK_K // 2 + 12), + GGMLQuantizationType.Q5_K: (256, 2 + 2 + QK_K // 2 + QK_K // 8 + 12), + GGMLQuantizationType.Q6_K: (256, 2 + QK_K // 2 + QK_K // 4 + QK_K // 16), + GGMLQuantizationType.Q8_K: (256, 4 + QK_K + QK_K // 8), + GGMLQuantizationType.IQ2_XXS: (256, 2 + QK_K // 4), + GGMLQuantizationType.IQ2_XS: (256, 2 + QK_K // 4 + QK_K // 32), + GGMLQuantizationType.IQ3_XXS: (256, 2 + QK_K // 4 + QK_K // 8), + GGMLQuantizationType.IQ1_S: (256, 2 + QK_K // 8 + QK_K // 16), + GGMLQuantizationType.IQ4_NL: (32, 2 + 16), + GGMLQuantizationType.IQ3_S: (256, 2 + QK_K // 4 + QK_K // 8 + QK_K // 32 + 4), + GGMLQuantizationType.IQ2_S: (256, 2 + QK_K // 4 + QK_K // 16), + GGMLQuantizationType.IQ4_XS: (256, 2 + 2 + QK_K // 2 + QK_K // 64), + GGMLQuantizationType.I8: (1, 1), + GGMLQuantizationType.I16: (1, 2), + GGMLQuantizationType.I32: (1, 4), + GGMLQuantizationType.I64: (1, 8), + GGMLQuantizationType.F64: (1, 8), + GGMLQuantizationType.IQ1_M: (256, QK_K // 8 + QK_K // 16 + QK_K // 32), + GGMLQuantizationType.BF16: (1, 2), +} + +# copied from llama.cpp/gguf-py/gguf/quants.py to avoid dependence of gguf +def quant_shape_to_byte_shape(shape: Sequence[int], quant_type: GGMLQuantizationType): + block_size, type_size = GGML_QUANT_SIZES[quant_type] + if shape[-1] % block_size != 0: + raise ValueError(f"Quantized tensor row size ({shape[-1]}) is not a multiple of {quant_type.name} block size ({block_size})") + return (*shape[:-1], shape[-1] // block_size * type_size) + +GGML_TYPES = { + "F32": 0, + "F16": 1, + "Q8_0": 8, + "Q2_K": 10, + "Q3_K": 11, + "Q4_K": 12, + "Q5_K": 13, + "Q6_K": 14, +} + +GGML_NAMES = {ggml_type: name for name, ggml_type in GGML_TYPES.items()} + +GGML_BLOCK_SIZES = { + "F32": 4, + "F16": 2, + "Q8_0": 2 + 32, + "Q2_K": 256 // 16 + 256 // 4 + 2 + 2, + "Q3_K": 256 // 8 + 256 // 4 + 12 + 2, + "Q4_K": 2 + 2 + 12 + 256 // 2, + "Q5_K": 2 + 2 + 12 + 256 // 8 + 256 // 2, + "Q6_K": 256 // 2 + 256 // 4 + 256 // 16 + 2, +} + +GGML_ELEMENTS_PER_BLOCK = { + "F32": 1, + "F16": 1, + "Q8_0": 32, + "Q2_K": 256, + "Q3_K": 256, + "Q4_K": 256, + "Q5_K": 256, + "Q6_K": 256, +} + +# DATA_TYPES = { +# "uint32": 4, +# "int32": 5, +# "float32": 6, +# "string": 8, +# "array": 9, +# "uint64": 10, +# } +DATA_TYPES = { + "uint8": 0, + "int8": 1, + "uint16": 2, + "int16": 3, + "uint32": 4, + "int32": 5, + "float32": 6, + "bool": 7, + "string": 8, + "array": 9, + "uint64": 10, + "int64": 11, + "float64": 12, +} + +class GGUFLoader: + tensor_info: dict + gguf_path: str + tensor_file_map: dict # {tensor_name: tensor_file_path} + gguf_file_meta: dict + def __init__(self, gguf_path: str): + # Check dir exist + if not os.path.exists(gguf_path): + raise FileNotFoundError(f"GGUF dir not found: {gguf_path}") + + self.tensor_info = {} + self.gguf_path = gguf_path + self.tensor_file_map = {} + self.file_data_map = {} + self.gguf_file_meta = {} + + # Walk through all the .gguf files in the directory + for root, dirs, files in os.walk(gguf_path): + for file in files: + if file.endswith(".gguf"): + file_name = os.path.join(root, file) + with open(file_name, "rb") as f: + self.load_gguf(f) + if file_name not in self.file_data_map: + self.file_data_map[file_name] = np.memmap(file_name, mode = 'r') + + def load_gguf(self, f): + f.seek(0) + assert f.read(4) == b'GGUF' + values = struct.unpack("torch.Tensor: + t = self.tensor_info[name] + + shape = t["shape"] + ggml_type = t["ggml_type"] + + if ggml_type not in GGML_NAMES: + raise NotImplementedError(f"ggml_type {ggml_type} not implemented") + + ggml_name = GGML_NAMES[ggml_type] + + data = self.get_mmap_tensor(name) + + + if "cuda" in device.lower(): + values = GGML_DEQUANTIZE_GPU[ggml_name](data, device) + else: + values = GGML_DEQUANTIZE[ggml_name](data) + values = torch.from_numpy(values) + + return values.view(shape[::-1]) + +def read_value(f, data_type): + if data_type == DATA_TYPES["string"]: + length = struct.unpack("> 0, + qs[:, 16:32] >> 0, + qs[:, 00:16] >> 2, + qs[:, 16:32] >> 2, + qs[:, 00:16] >> 4, + qs[:, 16:32] >> 4, + qs[:, 00:16] >> 6, + qs[:, 16:32] >> 6, + qs[:, 32:48] >> 0, + qs[:, 48:64] >> 0, + qs[:, 32:48] >> 2, + qs[:, 48:64] >> 2, + qs[:, 32:48] >> 4, + qs[:, 48:64] >> 4, + qs[:, 32:48] >> 6, + qs[:, 48:64] >> 6, + ], axis=1) + + return d * (scales & 15) * (tmp & 3) - dmin * (scales >> 4) + +def dequantize_q2_k_gpu(data): + pass + +def dequantize_q3_k(data): + # C implementation + # https://github.com/ggerganov/ggml/blob/fca1caafea7de9fbd7efc733b9818f9cf2da3050/src/ggml-quants.c#L1723C32-L1723C42 + # C struct definition + # https://github.com/ggerganov/ggml/blob/fca1caafea7de9fbd7efc733b9818f9cf2da3050/src/ggml-quants.h#L95 + block_size = GGML_BLOCK_SIZES["Q3_K"] + num_blocks = len(data) // block_size + + data_f16 = np.frombuffer(data, dtype=np.float16).reshape(num_blocks, block_size // 2) + data_u8 = np.frombuffer(data, dtype=np.uint8).reshape(num_blocks, block_size) + + d = data_f16[:, -1].reshape(num_blocks, 1, 1).astype(np.float32) + bits = np.unpackbits(data_u8[:, :32].reshape(num_blocks, 32, 1), axis=-1, bitorder="little") + bits = 4 ^ (bits << 2) + qs = data_u8[:, 32:32 + 64].astype(np.int16) + a, b, c = data_u8[:, 96: 96 + 12].reshape(num_blocks, 3, 4).transpose(1, 0, 2) + scales = np.zeros((num_blocks, 4, 4), dtype=np.uint8) + scales[:, 0] = (a & 15) | ((c & 3) << 4) + scales[:, 1] = (b & 15) | (((c >> 2) & 3) << 4) + scales[:, 2] = (a >> 4) | (((c >> 4) & 3) << 4) + scales[:, 3] = (b >> 4) | ((c >> 6) << 4) + scales = scales.reshape(num_blocks, 16, 1).astype(np.int16) + + return d * (scales - 32) * np.stack([ + (((qs[:, 00:16] >> 0) & 3) - bits[:, :16, 0]), + (((qs[:, 16:32] >> 0) & 3) - bits[:, 16:, 0]), + (((qs[:, 00:16] >> 2) & 3) - bits[:, :16, 1]), + (((qs[:, 16:32] >> 2) & 3) - bits[:, 16:, 1]), + (((qs[:, 00:16] >> 4) & 3) - bits[:, :16, 2]), + (((qs[:, 16:32] >> 4) & 3) - bits[:, 16:, 2]), + (((qs[:, 00:16] >> 6) & 3) - bits[:, :16, 3]), + (((qs[:, 16:32] >> 6) & 3) - bits[:, 16:, 3]), + (((qs[:, 32:48] >> 0) & 3) - bits[:, :16, 4]), + (((qs[:, 48:64] >> 0) & 3) - bits[:, 16:, 4]), + (((qs[:, 32:48] >> 2) & 3) - bits[:, :16, 5]), + (((qs[:, 48:64] >> 2) & 3) - bits[:, 16:, 5]), + (((qs[:, 32:48] >> 4) & 3) - bits[:, :16, 6]), + (((qs[:, 48:64] >> 4) & 3) - bits[:, 16:, 6]), + (((qs[:, 32:48] >> 6) & 3) - bits[:, :16, 7]), + (((qs[:, 48:64] >> 6) & 3) - bits[:, 16:, 7]) + ], axis=1) + +def dequantize_q3_k_gpu(data): + pass + +def dequantize_q4_k(data): + # C implementation + # https://github.com/ggerganov/ggml/blob/fca1caafea7de9fbd7efc733b9818f9cf2da3050/src/ggml-quants.c#L1929 + # C struct definition + # https://github.com/ggerganov/ggml/blob/fca1caafea7de9fbd7efc733b9818f9cf2da3050/src/ggml-quants.h#L116 + block_size = GGML_BLOCK_SIZES["Q4_K"] + num_blocks = len(data) // block_size + + data_f16 = np.frombuffer(data, dtype=np.float16).reshape(num_blocks, block_size // 2) + data_u8 = np.frombuffer(data, dtype=np.uint8).reshape(num_blocks, block_size) + + # Casting to float32 because float16 is very slow on CPU + scale_factors = data_f16[:, 0].reshape(num_blocks, 1, 1).astype(np.float32) + scale_offsets = data_f16[:, 1].reshape(num_blocks, 1, 1).astype(np.float32) + qs1 = data_u8[:, 4:16].reshape(num_blocks, 12, 1) + qs2 = data_u8[:, 16:].reshape(num_blocks, 4, 32) + + # Dequantize scales and offsets (6 bits and 4 + 2 bits) + factors = scale_factors * np.concatenate([qs1[:, 0:4] & 0b111111, (qs1[:, 8:] & 15) | ((qs1[:, 0:4] >> 6) << 4)], axis=1) + offsets = scale_offsets * np.concatenate([qs1[:, 4:8] & 0b111111, (qs1[:, 8:] >> 4) | ((qs1[:, 4:8] >> 6) << 4)], axis=1) + + # Interleave low and high quantized bits + qs2 = np.stack([qs2 & 0xf, qs2 >> 4], axis=2).reshape(num_blocks, 8, 32) + # Dequantize final weights using scales and offsets + return factors * qs2 - offsets + +def dequantize_q4_k_gpu(data, device:str ="cuda"): + data = np.frombuffer(data, dtype=data.dtype) + device = torch.device(device) + # TODO: this and from_numpy in other functions will cause a warning saying that numpy is not writable, + # the best way to fix this is transfer ptr to KTransformersOps instead of Tensor. + data = torch.from_numpy(data) + return KTransformersOps.dequantize_q4_k(data, 144, device) + +def dequantize_q5_k(data): + # C implementation + # https://github.com/ggerganov/ggml/blob/fca1caafea7de9fbd7efc733b9818f9cf2da3050/src/ggml-quants.c#L2129 + # C struct definition + # https://github.com/ggerganov/ggml/blob/fca1caafea7de9fbd7efc733b9818f9cf2da3050/src/ggml-quants.h#L138 + block_size = GGML_BLOCK_SIZES["Q5_K"] + num_blocks = len(data) // block_size + + data_f16 = np.frombuffer(data, dtype=np.float16).reshape(num_blocks, block_size // 2) + data_u8 = np.frombuffer(data, dtype=np.uint8).reshape(num_blocks, block_size) + + d = data_f16[:, 0].reshape(num_blocks, 1).astype(np.float32) + dmin = data_f16[:, 1].reshape(num_blocks, 1).astype(np.float32) + scales = data_u8[:, 4:16].reshape(num_blocks, 12, 1) + qh = data_u8[:, 16: 16 + 32].reshape(num_blocks, 32, 1) + qs = data_u8[:, 48: 48 + 128].reshape(num_blocks, 4, 32) + + bits = np.unpackbits(qh, axis=-1, bitorder="little") + + qs_hi_4 = qs >> 4 + qs_lo_4 = qs & 15 + + scales_lo_6 = scales[:, :8] & 63 + scales_hi_6 = scales[:, :8] >> 6 + scales_lo_4 = scales[:, 8:] & 15 + scales_hi_4 = scales[:, 8:] >> 4 + + m1 = dmin * scales_lo_6[:, 4] + m2 = dmin * scales_lo_6[:, 5] + m3 = dmin * scales_lo_6[:, 6] + m4 = dmin * scales_lo_6[:, 7] + m5 = dmin * (scales_hi_4[:, 0] | (scales_hi_6[:, 4] << 4)) + m6 = dmin * (scales_hi_4[:, 1] | (scales_hi_6[:, 5] << 4)) + m7 = dmin * (scales_hi_4[:, 2] | (scales_hi_6[:, 6] << 4)) + m8 = dmin * (scales_hi_4[:, 3] | (scales_hi_6[:, 7] << 4)) + + d1 = d * scales_lo_6[:, 0] + d2 = d * scales_lo_6[:, 1] + d3 = d * scales_lo_6[:, 2] + d4 = d * scales_lo_6[:, 3] + d5 = d * (scales_lo_4[:, 0] | (scales_hi_6[:, 0] << 4)) + d6 = d * (scales_lo_4[:, 1] | (scales_hi_6[:, 1] << 4)) + d7 = d * (scales_lo_4[:, 2] | (scales_hi_6[:, 2] << 4)) + d8 = d * (scales_lo_4[:, 3] | (scales_hi_6[:, 3] << 4)) + + return np.concatenate([ + d1 * (qs_lo_4[:, 0] + (bits[:, :, 0] << 4)) - m1, + d2 * (qs_hi_4[:, 0] + (bits[:, :, 1] << 4)) - m2, + d3 * (qs_lo_4[:, 1] + (bits[:, :, 2] << 4)) - m3, + d4 * (qs_hi_4[:, 1] + (bits[:, :, 3] << 4)) - m4, + d5 * (qs_lo_4[:, 2] + (bits[:, :, 4] << 4)) - m5, + d6 * (qs_hi_4[:, 2] + (bits[:, :, 5] << 4)) - m6, + d7 * (qs_lo_4[:, 3] + (bits[:, :, 6] << 4)) - m7, + d8 * (qs_hi_4[:, 3] + (bits[:, :, 7] << 4)) - m8, + ], axis=1) + +def dequantize_q5_k_gpu(data): + pass + + +def dequantize_q6_k(data): + # C implementation + # https://github.com/ggerganov/ggml/blob/fca1caafea7de9fbd7efc733b9818f9cf2da3050/src/ggml-quants.c#L2275 + # C struct definition + # https://github.com/ggerganov/ggml/blob/fca1caafea7de9fbd7efc733b9818f9cf2da3050/src/ggml-quants.h#L152 + block_size = GGML_BLOCK_SIZES["Q6_K"] + num_blocks = len(data) // block_size + + data_f16 = np.frombuffer(data, dtype=np.float16).reshape(num_blocks, block_size // 2) + data_u8 = np.frombuffer(data, dtype=np.uint8).reshape(num_blocks, block_size) + data_i8 = np.frombuffer(data, dtype=np.int8).reshape(num_blocks, block_size) + + scales = data_f16[:, -1].reshape(num_blocks, 1).astype(np.float32) + # TODO use uint8 and cast later? + ql = data_u8[:, :128].astype(np.int16) + qh = data_u8[:, 128:192].astype(np.int16) + sc = data_i8[:, 192:208, np.newaxis].astype(np.float32) + + # Unpack bits, subtraction requires signed data type + q1 = (ql[:, :32 ] & 0xF) | (((qh[:, :32] >> 0) & 3) << 4) - 32 + q2 = (ql[:, 32:64 ] & 0xF) | (((qh[:, :32] >> 2) & 3) << 4) - 32 + q3 = (ql[:, :32 ] >> 4) | (((qh[:, :32] >> 4) & 3) << 4) - 32 + q4 = (ql[:, 32:64 ] >> 4) | (((qh[:, :32] >> 6) & 3) << 4) - 32 + q5 = (ql[:, 64:96 ] & 0xF) | (((qh[:, 32:] >> 0) & 3) << 4) - 32 + q6 = (ql[:, 96:128] & 0xF) | (((qh[:, 32:] >> 2) & 3) << 4) - 32 + q7 = (ql[:, 64:96 ] >> 4) | (((qh[:, 32:] >> 4) & 3) << 4) - 32 + q8 = (ql[:, 96:128] >> 4) | (((qh[:, 32:] >> 6) & 3) << 4) - 32 + + # Dequantize + return scales * np.concatenate([ + sc[:, 0] * q1[:, :16], + sc[:, 1] * q1[:, 16:], + sc[:, 2] * q2[:, :16], + sc[:, 3] * q2[:, 16:], + sc[:, 4] * q3[:, :16], + sc[:, 5] * q3[:, 16:], + sc[:, 6] * q4[:, :16], + sc[:, 7] * q4[:, 16:], + sc[:, 8] * q5[:, :16], + sc[:, 9] * q5[:, 16:], + sc[:, 10] * q6[:, :16], + sc[:, 11] * q6[:, 16:], + sc[:, 12] * q7[:, :16], + sc[:, 13] * q7[:, 16:], + sc[:, 14] * q8[:, :16], + sc[:, 15] * q8[:, 16:], + ], axis=1) + +# @torch.jit.script +def dequantize_q6_k_gpu(data: np.ndarray, device:str = "cuda"): + block_size = GGML_BLOCK_SIZES["Q6_K"] + device = torch.device(device) + num_blocks = len(data) // block_size + data = np.frombuffer(data, dtype=data.dtype) + data = torch.from_numpy(data) + return KTransformersOps.dequantize_q6_k(data, 210, device) + +def dequantize_q8_0(data): + # C struct definition + # https://github.com/ggerganov/ggml/blob/fca1caafea7de9fbd7efc733b9818f9cf2da3050/src/ggml-quants.h#L43 + num_blocks = len(data) // GGML_BLOCK_SIZES["Q8_0"] + + scales = np.frombuffer(data, dtype=np.float16).reshape(num_blocks, 1 + 16)[:, :1].astype(np.float32) + qs = np.frombuffer(data, dtype=np.int8).reshape(num_blocks, 2 + 32)[:, 2:] + return scales * qs + +def dequantize_q8_0_gpu(data, device:str = "cuda"): + # C struct definition + # https://github.com/ggerganov/ggml/blob/fca1caafea7de9fbd7efc733b9818f9cf2da3050/src/ggml-quants.h#L43 + num_blocks = len(data) // GGML_BLOCK_SIZES["Q8_0"] + device = torch.device(device) + data = np.frombuffer(data, dtype=data.dtype) + data = torch.from_numpy(data) + return KTransformersOps.dequantize_q8_0(data, 34, device) + + +def dequantize_f32(data): + return np.frombuffer(data, dtype=np.float32) + +def dequantize_f32_gpu(data, device): + data = np.frombuffer(data, dtype=np.float32) + res = torch.from_numpy(data) + res_gpu = torch.empty_like(res, device=device) + res_gpu.copy_(res) + return res_gpu + +def dequantize_f16(data): + return np.frombuffer(data, dtype=np.float16) + +def dequantize_f16_gpu(data, device): + data = np.frombuffer(data, dtype=np.float16) + res = torch.from_numpy(data) + res_gpu = torch.empty_like(res, device=device) + res_gpu.copy_(res) + return res + +GGML_DEQUANTIZE = { + "F32": dequantize_f32, + "F16": dequantize_f16, + "Q8_0": dequantize_q8_0, + "Q2_K": dequantize_q2_k, + "Q3_K": dequantize_q3_k, + "Q4_K": dequantize_q4_k, + "Q5_K": dequantize_q5_k, + "Q6_K": dequantize_q6_k, +} + +GGML_DEQUANTIZE_GPU = { + "F32": dequantize_f32_gpu, + "F16": dequantize_f16_gpu, + "Q8_0": dequantize_q8_0_gpu, + "Q2_K": dequantize_q2_k_gpu, + "Q3_K": dequantize_q3_k_gpu, + "Q4_K": dequantize_q4_k_gpu, + "Q5_K": dequantize_q5_k_gpu, + "Q6_K": dequantize_q6_k_gpu, +} + +def translate_name_to_gguf(name): + name = name.replace("lm_head.", "output.") + name = name.replace("model.embed_tokens.", "token_embd.") + name = name.replace("model.norm.", "output_norm.") + + name = name.replace("model.layers.", "blk.") + name = name.replace(".input_layernorm", ".attn_norm") + name = name.replace(".mlp.down_proj", ".ffn_down") + name = name.replace(".mlp.gate_proj", ".ffn_gate") + name = name.replace(".mlp.up_proj", ".ffn_up") + name = name.replace(".post_attention_layernorm", ".ffn_norm") + name = name.replace(".self_attn.q_proj", ".attn_q") + name = name.replace(".self_attn.k_proj", ".attn_k") + name = name.replace(".self_attn.v_proj", ".attn_v") + name = name.replace(".self_attn.o_proj", ".attn_output") + name = name.replace(".self_attn.qkv_proj", ".attn_qkv") + name = name.replace(".self_attn.kv_a_proj_with_mqa", ".attn_kv_a_mqa") + name = name.replace(".self_attn.kv_a_layernorm", ".attn_kv_a_norm") + name = name.replace(".self_attn.kv_b_proj", ".attn_kv_b") + name = name.replace(".self_attn.q_a_proj", ".attn_q_a") + name = name.replace(".self_attn.q_a_layernorm", ".attn_q_a_norm") + name = name.replace(".self_attn.q_b_proj", ".attn_q_b") + + name = name.replace(".shared_expert.", ".shared_experts.") + name = name.replace(".shared_expert_", ".shared_experts_") + name = name.replace(".gate_up_proj.", ".up_proj") + + name = name.replace(".mlp.shared_experts.down_proj", ".ffn_down_shexp") + name = name.replace(".mlp.gate", ".ffn_gate_inp") + name = name.replace(".mlp.shared_experts.gate_proj", ".ffn_gate_shexp") + name = name.replace(".mlp.shared_experts.up_proj", ".ffn_up_shexp") + name = name.replace(".mlp.shared_experts_gate", ".ffn_gate_inp_shexp") + name = name.replace(".mlp.experts", "") + name = name.replace(".mlp.experts.ffn_down_exps", ".ffn_down_exps") + name = name.replace(".mlp.experts.ffn_gate_exps", ".ffn_gate_exps") + name = name.replace(".mlp.experts.ffn_up_exps", ".ffn_up_exps") + + return name + +if __name__ == '__main__': + gguf_path = '/mnt/data/model/DeepSeek-Coder-V2-GGUF-WJH' + loader = GGUFLoader(gguf_path) + loader.load_gguf_tensor('token_embd.weight') diff --git a/ktransformers/util/textstream.py b/ktransformers/util/textstream.py new file mode 100644 index 0000000..eb30490 --- /dev/null +++ b/ktransformers/util/textstream.py @@ -0,0 +1,84 @@ +from typing import Any, List, Optional, Set +class TextStreamer: + + def __init__(self, tokenizer: "AutoTokenizer", skip_prompt: bool = False, **decode_kwargs): + self.tokenizer = tokenizer + self.skip_prompt = skip_prompt + self.decode_kwargs = decode_kwargs + + # variables used in the streaming process + self.token_cache = [] + self.print_len = 0 + self.next_tokens_are_prompt = True + + def reset(self): + self.token_cache = [] + self.print_len = 0 + + def put(self, value)->Optional[str]: + """ + Receives tokens, decodes them, and prints them to stdout as soon as they form entire words. + """ + if not isinstance(value,int): + raise ValueError("TextStreamer only supports batch size 1, and int type input") + + + if self.skip_prompt and self.next_tokens_are_prompt: + self.next_tokens_are_prompt = False + return None + + # Add the new token to the cache and decodes the entire thing. + self.token_cache.append(value) + text = self.tokenizer.decode(self.token_cache, skip_special_tokens=True,**self.decode_kwargs) + + # After the symbol for a new line, we flush the cache. + if text.endswith("\n"): + printable_text = text[self.print_len :] + self.reset() + # If the last token is a CJK character, we print the characters. + elif len(text) > 0 and self._is_chinese_char(ord(text[-1])): + printable_text = text[self.print_len :] + self.print_len += len(printable_text) + # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, + # which may change with the subsequent token -- there are probably smarter ways to do this!) + else: + printable_text = text[self.print_len : text.rfind(" ") + 1] + self.print_len += len(printable_text) + return printable_text + + def end(self)->Optional[str]: + """Flushes any remaining cache and prints a newline to stdout.""" + # Flush the cache, if it exists + if len(self.token_cache) > 0: + text = self.tokenizer.decode(self.token_cache, skip_special_tokens=True, **self.decode_kwargs) + printable_text = text[self.print_len :] + self.reset() + else: + printable_text = "" + + self.next_tokens_are_prompt = True + return printable_text + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # + return True + + return False \ No newline at end of file diff --git a/ktransformers/util/utils.py b/ktransformers/util/utils.py new file mode 100644 index 0000000..7976e56 --- /dev/null +++ b/ktransformers/util/utils.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Description : +Author : Boxin Zhang, Azure-Tang +Version : 0.1.0 +Copyright (c) 2024 by KVCache.AI, All Rights Reserved. +''' +import torch +from torch import nn +import itertools +import time +import enum +from ktransformers.util.custom_gguf import translate_name_to_gguf +from ktransformers.util.custom_gguf import GGUFLoader +from ktransformers.operators import base_operator +from ktransformers.models.custom_cache import StaticCache +from ktransformers.util.cuda_graph_runner import CUDAGraphRunner +from ktransformers.util.textstream import TextStreamer + +def set_module(model, submodule_key, module): + tokens = submodule_key.split('.') + sub_tokens = tokens[:-1] + cur_mod = model + for s in sub_tokens: + if hasattr(cur_mod, s): + cur_mod = getattr(cur_mod, s) + else: # nn.ModuleList or nn.ModuleList + cur_mod=cur_mod[int(s)] + if hasattr(cur_mod, tokens[-1]): + setattr(cur_mod, tokens[-1], module) + else: # nn.ModuleList or nn.ModuleList + cur_mod[int(tokens[-1])] = module + +def set_param(module: nn.Module, name: str, weights: torch.Tensor): + + param=nn.parameter.Parameter(weights, requires_grad=False) + if isinstance(module, nn.Linear) and len(weights.shape)==1: + param.unsqueeze_(0) + setattr(module, name, param) + +def load_cur_state_dict(module: nn.Module, gguf_loader: GGUFLoader, prefix: str = ""): + prefix = prefix.replace("orig_module.", "") + persistent_buffers = {k: v for k, v in module._buffers.items() if k not in module._non_persistent_buffers_set} + local_name_params = itertools.chain(module._parameters.items(), persistent_buffers.items()) + local_state = {k: v for k, v in local_name_params if v is not None} + for name, param in local_state.items(): + key = prefix + name + translated_key = translate_name_to_gguf(key) + print("default loading weights", key, translated_key) + if translated_key in gguf_loader.tensor_file_map: + target_dtype = torch.get_default_dtype() + device = "cpu" if "embd" in translated_key else "cuda" + weights = gguf_loader.load_gguf_tensor(translated_key, device = device).to(dtype = target_dtype) + set_param(module, name, weights) + del weights + else: + #print(load_config.tensor_file_map.keys()) + raise Exception(f"can't fand {translated_key} in GGUF file!") + +def load_weights(module:nn.Module, gguf_loader:GGUFLoader, prefix='', return_when_injected:bool = False, only_load_injected:bool = False): + # print(f"recursively loading weights {prefix},{return_when_injected=}, {only_load_injected=}") + if not isinstance(module, base_operator.BaseInjectedModule): + load_cur_state_dict(module, gguf_loader, prefix) + for name, child in module._modules.items(): + load_weights(child, gguf_loader, prefix+name+".") + else: + module.load() + +def prefill_and_generate(model, tokenizer, inputs, max_new_tokens=10000): + import os + os.environ["TOKENIZERS_PARALLELISM"] = "false" + torch._dynamo.config.suppress_errors = True + batch_size, seq_length = inputs.shape + torch_device = inputs.device + tokens = [] + + def decode_one_tokens(cuda_graph_runner, cur_token, position_ids, cache_position, past_key_values): + logits = cuda_graph_runner(cur_token, position_ids, cache_position) + past_key_values.change_seq_length(1) + """ + with torch.cuda.stream(custom_stream): + logits=model(cur_token, + position_ids=position_ids, + cache_position=cache_position, + past_key_values=past_key_values, + return_dict=False, use_cache=True)[0] + #""" + torch.cuda.synchronize() + #print(logits) + next_token_scores = logits_warper(inputs, logits[:, -1, :]) + if generation_config.do_sample: + probs = nn.functional.softmax(next_token_scores, dim=-1) + next_token = torch.multinomial(probs, num_samples=1).squeeze(1) + else: + next_token = torch.argmax(next_token_scores, dim=-1) + return next_token + + with torch.no_grad(): + stream = TextStreamer(tokenizer) + past_key_values = StaticCache( + config = model.config, max_batch_size = 1, max_cache_len = seq_length + max_new_tokens, device = torch_device, dtype = model.dtype + ) + cache_position = torch.arange(seq_length, device=torch_device) + generated_ids = torch.zeros( + batch_size, seq_length + max_new_tokens + 1, dtype=torch.int, device=torch_device + ) + generated_ids[:, cache_position] = inputs.to(torch_device).to(torch.int) + past_key_values.cur_idx=cache_position + start_time = time.time() + #custom_stream = torch.cuda.Stream() + + inputs_embeds = model.model.embed_tokens(inputs.to("cpu")).to("cuda") + logits = model( + inputs_embeds = inputs_embeds, cache_position=cache_position, past_key_values=past_key_values, return_dict=False, use_cache=True + )[0][:,-1,:].unsqueeze(0).clone() + generation_config, model_kwargs = model._prepare_generation_config( + None, max_length=max_new_tokens, + do_sample=True, top_k=5, top_p=0.85, temperature=0.1 # change this to modify generate config + ) + try: # transformers==4.43 + logits_warper = ( + model._get_logits_warper(generation_config,device=inputs.device) if generation_config.do_sample else None + ) + except: + logits_warper = ( + model._get_logits_warper(generation_config) if generation_config.do_sample else None + ) + next_token_scores = logits_warper(inputs, logits[:, -1, :]) + if generation_config.do_sample: + probs = nn.functional.softmax(next_token_scores, dim=-1) + next_token = torch.multinomial(probs, num_samples=1).squeeze(1) + else: + next_token = torch.argmax(next_token_scores, dim=-1) + first_token_time = time.time() - start_time + + prefill_count = seq_length + prefill_time = first_token_time + + print(stream.put(next_token.item()), end="", flush=True) + generated_ids[:, seq_length] = next_token + tokens.append(next_token) + inputs = torch.cat((inputs, next_token.unsqueeze(0)), dim=-1) + cache_position = torch.tensor([seq_length], device=torch_device) + position_ids = cache_position.unsqueeze(0) + seq_length += 1 + + cuda_graph_runner = CUDAGraphRunner() + cuda_graph_runner.capture(model, next_token.unsqueeze(0), position_ids, cache_position, past_key_values, return_dict=False, use_cache=True) + start_time = time.time() + for _ in range(1, max_new_tokens): + next_token = decode_one_tokens(cuda_graph_runner, next_token.unsqueeze(0), position_ids, cache_position, past_key_values) + inputs = torch.cat((inputs, next_token.unsqueeze(0)), dim=-1) + generated_ids[:, cache_position] = next_token.int() + tokens.append(next_token.int()) + seq_length += 1 + + if next_token[0].item() == tokenizer.eos_token_id: + print(stream.end(), end="", flush=True) + break + else: + print(stream.put(next_token.item()), end="", flush=True) + cache_position += 1 + position_ids = cache_position.unsqueeze(0) + + total_time = time.time() - start_time + tokens_generated = len(tokens) + tokens_per_second = tokens_generated / total_time + + print("") + + print(f"prompt eval count: {prefill_count} token(s)") + print(f"prompt eval duration: {prefill_time}s") + print(f"prompt eval rate: {prefill_count/prefill_time} tokens/s") + print(f"eval count: {tokens_generated} token(s)") + print(f"eval duration: {total_time}s") + print(f"eval rate: {tokens_per_second} tokens/s") + + return tokens + +class InferenceState(enum.Enum): + UNLOAD = 0 + PREFILL = 1 + GENERATE = 2 + RESTORE = 3 diff --git a/ktransformers/website/.browserslistrc b/ktransformers/website/.browserslistrc new file mode 100644 index 0000000..dc3bc09 --- /dev/null +++ b/ktransformers/website/.browserslistrc @@ -0,0 +1,4 @@ +> 1% +last 2 versions +not dead +not ie 11 diff --git a/ktransformers/website/.eslintrc.js b/ktransformers/website/.eslintrc.js new file mode 100644 index 0000000..02f4dfd --- /dev/null +++ b/ktransformers/website/.eslintrc.js @@ -0,0 +1,29 @@ +module.exports = { + root: true, + env: { + node: true + }, + 'extends': [ + 'plugin:vue/vue3-essential', + 'eslint:recommended', + '@vue/typescript/recommended' + ], + parserOptions: { + ecmaVersion: 2020 + }, + rules: { + 'no-console': process.env.NODE_ENV === 'production' ? 'warn' : 'off', + 'no-debugger': process.env.NODE_ENV === 'production' ? 'warn' : 'off' + }, + overrides: [ + { + files: [ + '**/__tests__/*.{j,t}s?(x)', + '**/tests/unit/**/*.spec.{j,t}s?(x)' + ], + env: { + jest: true + } + } + ] +} diff --git a/ktransformers/website/.gitignore b/ktransformers/website/.gitignore new file mode 100644 index 0000000..403adbc --- /dev/null +++ b/ktransformers/website/.gitignore @@ -0,0 +1,23 @@ +.DS_Store +node_modules +/dist + + +# local env files +.env.local +.env.*.local + +# Log files +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* + +# Editor directories and files +.idea +.vscode +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? diff --git a/ktransformers/website/README.md b/ktransformers/website/README.md new file mode 100644 index 0000000..67ebaaf --- /dev/null +++ b/ktransformers/website/README.md @@ -0,0 +1,29 @@ +# + +## Project setup +``` +npm install +``` + +### Compiles and hot-reloads for development +``` +npm run serve +``` + +### Compiles and minifies for production +``` +npm run build +``` + +### Run your unit tests +``` +npm run test:unit +``` + +### Lints and fixes files +``` +npm run lint +``` + +### Customize configuration +See [Configuration Reference](https://cli.vuejs.org/config/). diff --git a/ktransformers/website/config.d.ts b/ktransformers/website/config.d.ts new file mode 100644 index 0000000..27abf4a --- /dev/null +++ b/ktransformers/website/config.d.ts @@ -0,0 +1,7 @@ +declare module '*.js' { + const config: { + apiUrl: string; + port:number; + }; + export { config }; + } \ No newline at end of file diff --git a/ktransformers/website/jest.config.js b/ktransformers/website/jest.config.js new file mode 100644 index 0000000..46d375d --- /dev/null +++ b/ktransformers/website/jest.config.js @@ -0,0 +1,3 @@ +module.exports = { + preset: '@vue/cli-plugin-unit-jest/presets/typescript' +} diff --git a/ktransformers/website/package-lock.json b/ktransformers/website/package-lock.json new file mode 100644 index 0000000..7d77ec1 --- /dev/null +++ b/ktransformers/website/package-lock.json @@ -0,0 +1,24730 @@ +{ + "name": "website", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "dependencies": { + "@types/pdfjs-dist": "^2.10.378", + "@types/websocket": "^1.0.10", + "@vue/cli": "^5.0.8", + "ant-design-vue": "^4.2.1", + "apexcharts": "^3.49.1", + "axios": "^1.7.0", + "axios-extensions": "^3.1.6", + "better-scroll": "^2.5.1", + "element-plus": "^2.7.3", + "marked": "^12.0.2", + "marked-highlight": "^2.1.1", + "pdf-lib": "^1.17.1", + "pdfobject": "^2.3.0", + "v-clipboard": "^3.0.0-next.1", + "vue": "^3.4.27", + "vue-i18n": "^9.13.1", + "vue-pdf": "^4.3.0", + "vue-router": "^4.0.3", + "vue3-apexcharts": "^1.5.3", + "vuex": "^4.0.0", + "webpack": "^5.91.0", + "webpack-cli": "^5.1.4", + "websocket": "^1.0.35" + }, + "devDependencies": { + "@types/jest": "^27.0.1", + "@types/pdfobject": "^2.2.5", + "@typescript-eslint/eslint-plugin": "^5.4.0", + "@typescript-eslint/parser": "^5.4.0", + "@vue/cli-plugin-eslint": "~5.0.0", + "@vue/cli-plugin-router": "~5.0.0", + "@vue/cli-plugin-typescript": "~5.0.0", + "@vue/cli-plugin-unit-jest": "~5.0.0", + "@vue/cli-plugin-vuex": "~5.0.0", + "@vue/cli-service": "~5.0.0", + "@vue/eslint-config-typescript": "^9.1.0", + "@vue/test-utils": "^2.0.0-0", + "@vue/vue3-jest": "^27.0.0-alpha.1", + "babel-jest": "^27.0.6", + "eslint": "^7.32.0", + "eslint-plugin-vue": "^8.0.3", + "jest": "^27.0.5", + "stylus": "^0.55.0", + "stylus-loader": "^6.1.0", + "ts-jest": "^27.0.4", + "typescript": "~4.5.5" + } + }, + "node_modules/@achrinza/node-ipc": { + "version": "9.2.8", + "resolved": "https://registry.npmmirror.com/@achrinza/node-ipc/-/node-ipc-9.2.8.tgz", + "integrity": "sha512-DSzEEkbMYbAUVlhy7fg+BzccoRuSQzqHbIPGxGv19OJ2WKwS3/9ChAnQcII4g+GujcHhyJ8BUuOVAx/S5uAfQg==", + "dependencies": { + "@node-ipc/js-queue": "2.0.3", + "event-pubsub": "4.3.0", + "js-message": "1.0.7" + }, + "engines": { + "node": "8 || 9 || 10 || 11 || 12 || 13 || 14 || 15 || 16 || 17 || 18 || 19 || 20 || 21" + } + }, + "node_modules/@akryum/winattr": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/@akryum/winattr/-/winattr-3.0.0.tgz", + "integrity": "sha512-t4WmWoGV9gyzypwG3y3JlcK2t8fKLtvzBA7xEoFTj9SMPvOuLsf13uh4ikK0RRaaa9RPPWLgFUdOyIRaQvCpwQ==", + "dependencies": { + "fswin": "^2.17.1227" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@ampproject/remapping": { + "version": "2.3.0", + "resolved": "https://registry.npmmirror.com/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@ant-design/colors": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/@ant-design/colors/-/colors-6.0.0.tgz", + "integrity": "sha512-qAZRvPzfdWHtfameEGP2Qvuf838NhergR35o+EuVyB5XvSA98xod5r4utvi4TJ3ywmevm290g9nsCG5MryrdWQ==", + "dependencies": { + "@ctrl/tinycolor": "^3.4.0" + } + }, + "node_modules/@ant-design/icons-svg": { + "version": "4.4.2", + "resolved": "https://registry.npmmirror.com/@ant-design/icons-svg/-/icons-svg-4.4.2.tgz", + "integrity": "sha512-vHbT+zJEVzllwP+CM+ul7reTEfBR0vgxFe7+lREAsAA7YGsYpboiq2sQNeQeRvh09GfQgs/GyFEvZpJ9cLXpXA==" + }, + "node_modules/@ant-design/icons-vue": { + "version": "7.0.1", + "resolved": "https://registry.npmmirror.com/@ant-design/icons-vue/-/icons-vue-7.0.1.tgz", + "integrity": "sha512-eCqY2unfZK6Fe02AwFlDHLfoyEFreP6rBwAZMIJ1LugmfMiVgwWDYlp1YsRugaPtICYOabV1iWxXdP12u9U43Q==", + "dependencies": { + "@ant-design/colors": "^6.0.0", + "@ant-design/icons-svg": "^4.2.1" + }, + "peerDependencies": { + "vue": ">=3.0.3" + } + }, + "node_modules/@apollo/protobufjs": { + "version": "1.2.7", + "resolved": "https://registry.npmmirror.com/@apollo/protobufjs/-/protobufjs-1.2.7.tgz", + "integrity": "sha512-Lahx5zntHPZia35myYDBRuF58tlwPskwHc5CWBZC/4bMKB6siTBWwtMrkqXcsNwQiFSzSx5hKdRPUmemrEp3Gg==", + "hasInstallScript": true, + "dependencies": { + "@protobufjs/aspromise": "^1.1.2", + "@protobufjs/base64": "^1.1.2", + "@protobufjs/codegen": "^2.0.4", + "@protobufjs/eventemitter": "^1.1.0", + "@protobufjs/fetch": "^1.1.0", + "@protobufjs/float": "^1.0.2", + "@protobufjs/inquire": "^1.1.0", + "@protobufjs/path": "^1.1.2", + "@protobufjs/pool": "^1.1.0", + "@protobufjs/utf8": "^1.1.0", + "@types/long": "^4.0.0", + "long": "^4.0.0" + }, + "bin": { + "apollo-pbjs": "bin/pbjs", + "apollo-pbts": "bin/pbts" + } + }, + "node_modules/@apollo/usage-reporting-protobuf": { + "version": "4.1.1", + "resolved": "https://registry.npmmirror.com/@apollo/usage-reporting-protobuf/-/usage-reporting-protobuf-4.1.1.tgz", + "integrity": "sha512-u40dIUePHaSKVshcedO7Wp+mPiZsaU6xjv9J+VyxpoU/zL6Jle+9zWeG98tr/+SZ0nZ4OXhrbb8SNr0rAPpIDA==", + "dependencies": { + "@apollo/protobufjs": "1.2.7" + } + }, + "node_modules/@apollo/utils.dropunuseddefinitions": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/@apollo/utils.dropunuseddefinitions/-/utils.dropunuseddefinitions-1.1.0.tgz", + "integrity": "sha512-jU1XjMr6ec9pPoL+BFWzEPW7VHHulVdGKMkPAMiCigpVIT11VmCbnij0bWob8uS3ODJ65tZLYKAh/55vLw2rbg==", + "engines": { + "node": ">=12.13.0" + }, + "peerDependencies": { + "graphql": "14.x || 15.x || 16.x" + } + }, + "node_modules/@apollo/utils.keyvaluecache": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/@apollo/utils.keyvaluecache/-/utils.keyvaluecache-1.0.2.tgz", + "integrity": "sha512-p7PVdLPMnPzmXSQVEsy27cYEjVON+SH/Wb7COyW3rQN8+wJgT1nv9jZouYtztWW8ZgTkii5T6tC9qfoDREd4mg==", + "dependencies": { + "@apollo/utils.logger": "^1.0.0", + "lru-cache": "7.10.1 - 7.13.1" + } + }, + "node_modules/@apollo/utils.keyvaluecache/node_modules/lru-cache": { + "version": "7.13.1", + "resolved": "https://registry.npmmirror.com/lru-cache/-/lru-cache-7.13.1.tgz", + "integrity": "sha512-CHqbAq7NFlW3RSnoWXLJBxCWaZVBrfa9UEHId2M3AW8iEBurbqduNexEUCGc3SHc6iCYXNJCDi903LajSVAEPQ==", + "engines": { + "node": ">=12" + } + }, + "node_modules/@apollo/utils.logger": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/@apollo/utils.logger/-/utils.logger-1.0.1.tgz", + "integrity": "sha512-XdlzoY7fYNK4OIcvMD2G94RoFZbzTQaNP0jozmqqMudmaGo2I/2Jx71xlDJ801mWA/mbYRihyaw6KJii7k5RVA==" + }, + "node_modules/@apollo/utils.printwithreducedwhitespace": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/@apollo/utils.printwithreducedwhitespace/-/utils.printwithreducedwhitespace-1.1.0.tgz", + "integrity": "sha512-GfFSkAv3n1toDZ4V6u2d7L4xMwLA+lv+6hqXicMN9KELSJ9yy9RzuEXaX73c/Ry+GzRsBy/fdSUGayGqdHfT2Q==", + "engines": { + "node": ">=12.13.0" + }, + "peerDependencies": { + "graphql": "14.x || 15.x || 16.x" + } + }, + "node_modules/@apollo/utils.removealiases": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/@apollo/utils.removealiases/-/utils.removealiases-1.0.0.tgz", + "integrity": "sha512-6cM8sEOJW2LaGjL/0vHV0GtRaSekrPQR4DiywaApQlL9EdROASZU5PsQibe2MWeZCOhNrPRuHh4wDMwPsWTn8A==", + "engines": { + "node": ">=12.13.0" + }, + "peerDependencies": { + "graphql": "14.x || 15.x || 16.x" + } + }, + "node_modules/@apollo/utils.sortast": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/@apollo/utils.sortast/-/utils.sortast-1.1.0.tgz", + "integrity": "sha512-VPlTsmUnOwzPK5yGZENN069y6uUHgeiSlpEhRnLFYwYNoJHsuJq2vXVwIaSmts015WTPa2fpz1inkLYByeuRQA==", + "dependencies": { + "lodash.sortby": "^4.7.0" + }, + "engines": { + "node": ">=12.13.0" + }, + "peerDependencies": { + "graphql": "14.x || 15.x || 16.x" + } + }, + "node_modules/@apollo/utils.stripsensitiveliterals": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/@apollo/utils.stripsensitiveliterals/-/utils.stripsensitiveliterals-1.2.0.tgz", + "integrity": "sha512-E41rDUzkz/cdikM5147d8nfCFVKovXxKBcjvLEQ7bjZm/cg9zEcXvS6vFY8ugTubI3fn6zoqo0CyU8zT+BGP9w==", + "engines": { + "node": ">=12.13.0" + }, + "peerDependencies": { + "graphql": "14.x || 15.x || 16.x" + } + }, + "node_modules/@apollo/utils.usagereporting": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/@apollo/utils.usagereporting/-/utils.usagereporting-1.0.1.tgz", + "integrity": "sha512-6dk+0hZlnDbahDBB2mP/PZ5ybrtCJdLMbeNJD+TJpKyZmSY6bA3SjI8Cr2EM9QA+AdziywuWg+SgbWUF3/zQqQ==", + "dependencies": { + "@apollo/usage-reporting-protobuf": "^4.0.0", + "@apollo/utils.dropunuseddefinitions": "^1.1.0", + "@apollo/utils.printwithreducedwhitespace": "^1.1.0", + "@apollo/utils.removealiases": "1.0.0", + "@apollo/utils.sortast": "^1.1.0", + "@apollo/utils.stripsensitiveliterals": "^1.2.0" + }, + "engines": { + "node": ">=12.13.0" + }, + "peerDependencies": { + "graphql": "14.x || 15.x || 16.x" + } + }, + "node_modules/@apollographql/apollo-tools": { + "version": "0.5.4", + "resolved": "https://registry.npmmirror.com/@apollographql/apollo-tools/-/apollo-tools-0.5.4.tgz", + "integrity": "sha512-shM3q7rUbNyXVVRkQJQseXv6bnYM3BUma/eZhwXR4xsuM+bqWnJKvW7SAfRjP7LuSCocrexa5AXhjjawNHrIlw==", + "engines": { + "node": ">=8", + "npm": ">=6" + }, + "peerDependencies": { + "graphql": "^14.2.1 || ^15.0.0 || ^16.0.0" + } + }, + "node_modules/@apollographql/graphql-playground-html": { + "version": "1.6.29", + "resolved": "https://registry.npmmirror.com/@apollographql/graphql-playground-html/-/graphql-playground-html-1.6.29.tgz", + "integrity": "sha512-xCcXpoz52rI4ksJSdOCxeOCn2DLocxwHf9dVT/Q90Pte1LX+LY+91SFtJF3KXVHH8kEin+g1KKCQPKBjZJfWNA==", + "dependencies": { + "xss": "^1.0.8" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.24.2", + "resolved": "https://registry.npmmirror.com/@babel/code-frame/-/code-frame-7.24.2.tgz", + "integrity": "sha512-y5+tLQyV8pg3fsiln67BVLD1P13Eg4lh5RW9mF0zUuvLrv9uIQ4MCL+CRT+FTsBlBjcIan6PGsLcBN0m3ClUyQ==", + "dependencies": { + "@babel/highlight": "^7.24.2", + "picocolors": "^1.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.24.4", + "resolved": "https://registry.npmmirror.com/@babel/compat-data/-/compat-data-7.24.4.tgz", + "integrity": "sha512-vg8Gih2MLK+kOkHJp4gBEIkyaIi00jgWot2D9QOmmfLC8jINSOzmCLta6Bvz/JSBCqnegV0L80jhxkol5GWNfQ==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/core/-/core-7.24.5.tgz", + "integrity": "sha512-tVQRucExLQ02Boi4vdPp49svNGcfL2GhdTCT9aldhXgCJVAI21EtRfBettiuLUwce/7r6bFdgs6JFkcdTiFttA==", + "dependencies": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.24.2", + "@babel/generator": "^7.24.5", + "@babel/helper-compilation-targets": "^7.23.6", + "@babel/helper-module-transforms": "^7.24.5", + "@babel/helpers": "^7.24.5", + "@babel/parser": "^7.24.5", + "@babel/template": "^7.24.0", + "@babel/traverse": "^7.24.5", + "@babel/types": "^7.24.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/generator/-/generator-7.24.5.tgz", + "integrity": "sha512-x32i4hEXvr+iI0NEoEfDKzlemF8AmtOP8CcrRaEcpzysWuoEb1KknpcvMsHKPONoKZiDuItklgWhB18xEhr9PA==", + "dependencies": { + "@babel/types": "^7.24.5", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", + "jsesc": "^2.5.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-annotate-as-pure": { + "version": "7.22.5", + "resolved": "https://registry.npmmirror.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.22.5.tgz", + "integrity": "sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { + "version": "7.22.15", + "resolved": "https://registry.npmmirror.com/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.22.15.tgz", + "integrity": "sha512-QkBXwGgaoC2GtGZRoma6kv7Szfv06khvhFav67ZExau2RaXzy8MpHSMO2PNoP2XtmQphJQRHFfg77Bq731Yizw==", + "dependencies": { + "@babel/types": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.23.6", + "resolved": "https://registry.npmmirror.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.23.6.tgz", + "integrity": "sha512-9JB548GZoQVmzrFgp8o7KxdgkTGm6xs9DW0o/Pim72UDjzr5ObUQ6ZzYPqA+g9OTS2bBQoctLJrky0RDCAWRgQ==", + "dependencies": { + "@babel/compat-data": "^7.23.5", + "@babel/helper-validator-option": "^7.23.5", + "browserslist": "^4.22.2", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-create-class-features-plugin": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.24.5.tgz", + "integrity": "sha512-uRc4Cv8UQWnE4NXlYTIIdM7wfFkOqlFztcC/gVXDKohKoVB3OyonfelUBaJzSwpBntZ2KYGF/9S7asCHsXwW6g==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-member-expression-to-functions": "^7.24.5", + "@babel/helper-optimise-call-expression": "^7.22.5", + "@babel/helper-replace-supers": "^7.24.1", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.24.5", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin": { + "version": "7.22.15", + "resolved": "https://registry.npmmirror.com/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.22.15.tgz", + "integrity": "sha512-29FkPLFjn4TPEa3RE7GpW+qbE8tlsu3jntNYNfcGsc49LphF1PQIiD+vMZ1z1xVOKt+93khA9tc2JBs3kBjA7w==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "regexpu-core": "^5.3.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-define-polyfill-provider": { + "version": "0.6.2", + "resolved": "https://registry.npmmirror.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.2.tgz", + "integrity": "sha512-LV76g+C502biUK6AyZ3LK10vDpDyCzZnhZFXkH1L75zHPj68+qc8Zfpx2th+gzwA2MzyK+1g/3EPl62yFnVttQ==", + "dependencies": { + "@babel/helper-compilation-targets": "^7.22.6", + "@babel/helper-plugin-utils": "^7.22.5", + "debug": "^4.1.1", + "lodash.debounce": "^4.0.8", + "resolve": "^1.14.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/helper-environment-visitor": { + "version": "7.22.20", + "resolved": "https://registry.npmmirror.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-function-name": { + "version": "7.23.0", + "resolved": "https://registry.npmmirror.com/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", + "dependencies": { + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-hoist-variables": { + "version": "7.22.5", + "resolved": "https://registry.npmmirror.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-member-expression-to-functions": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.24.5.tgz", + "integrity": "sha512-4owRteeihKWKamtqg4JmWSsEZU445xpFRXPEwp44HbgbxdWlUV1b4Agg4lkA806Lil5XM/e+FJyS0vj5T6vmcA==", + "dependencies": { + "@babel/types": "^7.24.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.24.3", + "resolved": "https://registry.npmmirror.com/@babel/helper-module-imports/-/helper-module-imports-7.24.3.tgz", + "integrity": "sha512-viKb0F9f2s0BCS22QSF308z/+1YWKV/76mwt61NBzS5izMzDPwdq1pTrzf+Li3npBWX9KdQbkeCt1jSAM7lZqg==", + "dependencies": { + "@babel/types": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/helper-module-transforms/-/helper-module-transforms-7.24.5.tgz", + "integrity": "sha512-9GxeY8c2d2mdQUP1Dye0ks3VDyIMS98kt/llQ2nUId8IsWqTF0l1LkSX0/uP7l7MCDrzXS009Hyhe2gzTiGW8A==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-module-imports": "^7.24.3", + "@babel/helper-simple-access": "^7.24.5", + "@babel/helper-split-export-declaration": "^7.24.5", + "@babel/helper-validator-identifier": "^7.24.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-optimise-call-expression": { + "version": "7.22.5", + "resolved": "https://registry.npmmirror.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.22.5.tgz", + "integrity": "sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.5.tgz", + "integrity": "sha512-xjNLDopRzW2o6ba0gKbkZq5YWEBaK3PCyTOY1K2P/O07LGMhMqlMXPxwN4S5/RhWuCobT8z0jrlKGlYmeR1OhQ==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-remap-async-to-generator": { + "version": "7.22.20", + "resolved": "https://registry.npmmirror.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.22.20.tgz", + "integrity": "sha512-pBGyV4uBqOns+0UvhsTO8qgl8hO89PmiDYv+/COyp1aeMcmfrfruz+/nCMFiYyFF/Knn0yfrC85ZzNFjembFTw==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-wrap-function": "^7.22.20" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-replace-supers": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/helper-replace-supers/-/helper-replace-supers-7.24.1.tgz", + "integrity": "sha512-QCR1UqC9BzG5vZl8BMicmZ28RuUBnHhAMddD8yHFHDRH9lLTZ9uUPehX8ctVPT8l0TKblJidqcgUUKGVrePleQ==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-member-expression-to-functions": "^7.23.0", + "@babel/helper-optimise-call-expression": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-simple-access": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/helper-simple-access/-/helper-simple-access-7.24.5.tgz", + "integrity": "sha512-uH3Hmf5q5n7n8mz7arjUlDOCbttY/DW4DYhE6FUsjKJ/oYC1kQQUvwEQWxRwUpX9qQKRXeqLwWxrqilMrf32sQ==", + "dependencies": { + "@babel/types": "^7.24.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-skip-transparent-expression-wrappers": { + "version": "7.22.5", + "resolved": "https://registry.npmmirror.com/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz", + "integrity": "sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-split-export-declaration": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.24.5.tgz", + "integrity": "sha512-5CHncttXohrHk8GWOFCcCl4oRD9fKosWlIRgWm4ql9VYioKm52Mk2xsmoohvm7f3JoiLSM5ZgJuRaf5QZZYd3Q==", + "dependencies": { + "@babel/types": "^7.24.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/helper-string-parser/-/helper-string-parser-7.24.1.tgz", + "integrity": "sha512-2ofRCjnnA9y+wk8b9IAREroeUP02KHp431N2mhKniy2yKIDKpbrHv9eXwm8cBeWQYcJmzv5qKCu65P47eCF7CQ==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.5.tgz", + "integrity": "sha512-3q93SSKX2TWCG30M2G2kwaKeTYgEUp5Snjuj8qm729SObL6nbtUldAi37qbxkD5gg3xnBio+f9nqpSepGZMvxA==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.23.5", + "resolved": "https://registry.npmmirror.com/@babel/helper-validator-option/-/helper-validator-option-7.23.5.tgz", + "integrity": "sha512-85ttAOMLsr53VgXkTbkx8oA6YTfT4q7/HzXSLEYmjcSTJPMPQtvq1BD79Byep5xMUYbGRzEpDsjUf3dyp54IKw==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-wrap-function": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/helper-wrap-function/-/helper-wrap-function-7.24.5.tgz", + "integrity": "sha512-/xxzuNvgRl4/HLNKvnFwdhdgN3cpLxgLROeLDl83Yx0AJ1SGvq1ak0OszTOjDfiB8Vx03eJbeDWh9r+jCCWttw==", + "dependencies": { + "@babel/helper-function-name": "^7.23.0", + "@babel/template": "^7.24.0", + "@babel/types": "^7.24.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/helpers/-/helpers-7.24.5.tgz", + "integrity": "sha512-CiQmBMMpMQHwM5m01YnrM6imUG1ebgYJ+fAIW4FZe6m4qHTPaRHti+R8cggAwkdz4oXhtO4/K9JWlh+8hIfR2Q==", + "dependencies": { + "@babel/template": "^7.24.0", + "@babel/traverse": "^7.24.5", + "@babel/types": "^7.24.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/highlight/-/highlight-7.24.5.tgz", + "integrity": "sha512-8lLmua6AVh/8SLJRRVD6V8p73Hir9w5mJrhE+IPpILG31KKlI9iz5zmBYKcWPS59qSfgP9RaSBQSHHE81WKuEw==", + "dependencies": { + "@babel/helper-validator-identifier": "^7.24.5", + "chalk": "^2.4.2", + "js-tokens": "^4.0.0", + "picocolors": "^1.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/parser/-/parser-7.24.5.tgz", + "integrity": "sha512-EOv5IK8arwh3LI47dz1b0tKUb/1uhHAnHJOrjgtQMIpu1uXd9mlFrJg9IUgGUgZ41Ch0K8REPTYpO7B76b4vJg==", + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-firefox-class-in-computed-class-key": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.24.5.tgz", + "integrity": "sha512-LdXRi1wEMTrHVR4Zc9F8OewC3vdm5h4QB6L71zy6StmYeqGi1b3ttIO8UC+BfZKcH9jdr4aI249rBkm+3+YvHw==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-plugin-utils": "^7.24.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.24.1.tgz", + "integrity": "sha512-y4HqEnkelJIOQGd+3g1bTeKsA5c6qM7eOn7VggGVbBc0y8MLSKHacwcIE2PplNlQSj0PqS9rrXL/nkPVK+kUNg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.24.1.tgz", + "integrity": "sha512-Hj791Ii4ci8HqnaKHAlLNs+zaLXb0EzSDhiAWp5VNlyvCNymYfacs64pxTxbH1znW/NcArSmwpmG9IKE/TUVVQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/plugin-transform-optional-chaining": "^7.24.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.13.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.24.1.tgz", + "integrity": "sha512-m9m/fXsXLiHfwdgydIFnpk+7jlVbnvlK5B2EKiPdLUb6WX654ZaaEWJUjk8TftRbZpK0XibovlLWX4KIZhV6jw==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-proposal-class-properties": { + "version": "7.18.6", + "resolved": "https://registry.npmmirror.com/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.18.6.tgz", + "integrity": "sha512-cumfXOF0+nzZrrN8Rf0t7M+tF6sZc7vhQwYQck9q1/5w2OExlD+b4v4RpMJFaV1Z7WcDRgO6FqvxqxGlwo+RHQ==", + "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-class-properties instead.", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-proposal-nullish-coalescing-operator": { + "version": "7.18.6", + "resolved": "https://registry.npmmirror.com/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.18.6.tgz", + "integrity": "sha512-wQxQzxYeJqHcfppzBDnm1yAY0jSRkUXR2z8RePZYrKwMKgMlE8+Z6LUno+bd6LvbGh8Gltvy74+9pIYkr+XkKA==", + "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-nullish-coalescing-operator instead.", + "dependencies": { + "@babel/helper-plugin-utils": "^7.18.6", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-proposal-optional-chaining": { + "version": "7.21.0", + "resolved": "https://registry.npmmirror.com/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.21.0.tgz", + "integrity": "sha512-p4zeefM72gpmEe2fkUr/OnOXpWEf8nAgk7ZYVqqfFiyIG7oFfVZcCrU64hWn5xp4tQ9LkV4bTIa5rD0KANpKNA==", + "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-optional-chaining instead.", + "dependencies": { + "@babel/helper-plugin-utils": "^7.20.2", + "@babel/helper-skip-transparent-expression-wrappers": "^7.20.0", + "@babel/plugin-syntax-optional-chaining": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-proposal-private-property-in-object": { + "version": "7.21.0-placeholder-for-preset-env.2", + "resolved": "https://registry.npmmirror.com/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", + "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-bigint": { + "version": "7.8.3", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz", + "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-dynamic-import": { + "version": "7.8.3", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", + "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-export-namespace-from": { + "version": "7.8.3", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", + "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.3" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-flow": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-flow/-/plugin-syntax-flow-7.24.1.tgz", + "integrity": "sha512-sxi2kLTI5DeW5vDtMUsk4mTPwvlUDbjOnoWayhynCwrw4QXRld4QEYwqzY8JmQXaJUtgUuCIurtSRH5sn4c7mA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-assertions": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.24.1.tgz", + "integrity": "sha512-IuwnI5XnuF189t91XbxmXeCDz3qs6iDRO7GJ++wcfgeXNs/8FmIlKcpDSXNVyuLQxlwvskmI3Ct73wUODkJBlQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.24.1.tgz", + "integrity": "sha512-zhQTMH0X2nVLnb04tz+s7AMuasX8U0FnpE+nHTOhSOINjWMnopoZTxtIKsd45n4GQ/HIZLyfIpoul8e2m0DnRA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.24.1.tgz", + "integrity": "sha512-2eCtxZXf+kbkMIsXS4poTvT4Yu5rXiRa+9xGVT56raghjmBTKMpFNc9R4IDiB4emao9eO22Ox7CxuJG7BgExqA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.24.1.tgz", + "integrity": "sha512-Yhnmvy5HZEnHUty6i++gcfH1/l68AHnItFHnaCv6hn9dNh0hQvvQJsxpi4BMBFN5DLeHBuucT/0DgzXif/OyRw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-unicode-sets-regex": { + "version": "7.18.6", + "resolved": "https://registry.npmmirror.com/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", + "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-arrow-functions": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.24.1.tgz", + "integrity": "sha512-ngT/3NkRhsaep9ck9uj2Xhv9+xB1zShY3tM3g6om4xxCELwCDN4g4Aq5dRn48+0hasAql7s2hdBOysCfNpr4fw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-generator-functions": { + "version": "7.24.3", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.24.3.tgz", + "integrity": "sha512-Qe26CMYVjpQxJ8zxM1340JFNjZaF+ISWpr1Kt/jGo+ZTUzKkfw/pphEWbRCb+lmSM6k/TOgfYLvmbHkUQ0asIg==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-remap-async-to-generator": "^7.22.20", + "@babel/plugin-syntax-async-generators": "^7.8.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-to-generator": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.24.1.tgz", + "integrity": "sha512-AawPptitRXp1y0n4ilKcGbRYWfbbzFWz2NqNu7dacYDtFtz0CMjG64b3LQsb3KIgnf4/obcUL78hfaOS7iCUfw==", + "dependencies": { + "@babel/helper-module-imports": "^7.24.1", + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-remap-async-to-generator": "^7.22.20" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoped-functions": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.24.1.tgz", + "integrity": "sha512-TWWC18OShZutrv9C6mye1xwtam+uNi2bnTOCBUd5sZxyHOiWbU6ztSROofIMrK84uweEZC219POICK/sTYwfgg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoping": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.24.5.tgz", + "integrity": "sha512-sMfBc3OxghjC95BkYrYocHL3NaOplrcaunblzwXhGmlPwpmfsxr4vK+mBBt49r+S240vahmv+kUxkeKgs+haCw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-properties": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.24.1.tgz", + "integrity": "sha512-OMLCXi0NqvJfORTaPQBwqLXHhb93wkBKZ4aNwMl6WtehO7ar+cmp+89iPEQPqxAnxsOKTaMcs3POz3rKayJ72g==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.24.1", + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-static-block": { + "version": "7.24.4", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.24.4.tgz", + "integrity": "sha512-B8q7Pz870Hz/q9UgP8InNpY01CSLDSCyqX7zcRuv3FcPl87A2G17lASroHWaCtbdIcbYzOZ7kWmXFKbijMSmFg==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.24.4", + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/plugin-syntax-class-static-block": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0" + } + }, + "node_modules/@babel/plugin-transform-classes": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.24.5.tgz", + "integrity": "sha512-gWkLP25DFj2dwe9Ck8uwMOpko4YsqyfZJrOmqqcegeDYEbp7rmn4U6UQZNj08UF6MaX39XenSpKRCvpDRBtZ7Q==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-compilation-targets": "^7.23.6", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-plugin-utils": "^7.24.5", + "@babel/helper-replace-supers": "^7.24.1", + "@babel/helper-split-export-declaration": "^7.24.5", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-computed-properties": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.24.1.tgz", + "integrity": "sha512-5pJGVIUfJpOS+pAqBQd+QMaTD2vCL/HcePooON6pDpHgRp4gNRmzyHTPIkXntwKsq3ayUFVfJaIKPw2pOkOcTw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/template": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-destructuring": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.24.5.tgz", + "integrity": "sha512-SZuuLyfxvsm+Ah57I/i1HVjveBENYK9ue8MJ7qkc7ndoNjqquJiElzA7f5yaAXjyW2hKojosOTAQQRX50bPSVg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dotall-regex": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.24.1.tgz", + "integrity": "sha512-p7uUxgSoZwZ2lPNMzUkqCts3xlp8n+o05ikjy7gbtFJSt9gdU88jAmtfmOxHM14noQXBxfgzf2yRWECiNVhTCw==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.15", + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-duplicate-keys": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.24.1.tgz", + "integrity": "sha512-msyzuUnvsjsaSaocV6L7ErfNsa5nDWL1XKNnDePLgmz+WdU4w/J8+AxBMrWfi9m4IxfL5sZQKUPQKDQeeAT6lA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dynamic-import": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.24.1.tgz", + "integrity": "sha512-av2gdSTyXcJVdI+8aFZsCAtR29xJt0S5tas+Ef8NvBNmD1a+N/3ecMLeMBgfcK+xzsjdLDT6oHt+DFPyeqUbDA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/plugin-syntax-dynamic-import": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-exponentiation-operator": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.24.1.tgz", + "integrity": "sha512-U1yX13dVBSwS23DEAqU+Z/PkwE9/m7QQy8Y9/+Tdb8UWYaGNDYwTLi19wqIAiROr8sXVum9A/rtiH5H0boUcTw==", + "dependencies": { + "@babel/helper-builder-binary-assignment-operator-visitor": "^7.22.15", + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-export-namespace-from": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.24.1.tgz", + "integrity": "sha512-Ft38m/KFOyzKw2UaJFkWG9QnHPG/Q/2SkOrRk4pNBPg5IPZ+dOxcmkK5IyuBcxiNPyyYowPGUReyBvrvZs7IlQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-flow-strip-types": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-flow-strip-types/-/plugin-transform-flow-strip-types-7.24.1.tgz", + "integrity": "sha512-iIYPIWt3dUmUKKE10s3W+jsQ3icFkw0JyRVyY1B7G4yK/nngAOHLVx8xlhA6b/Jzl/Y0nis8gjqhqKtRDQqHWQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/plugin-syntax-flow": "^7.24.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-for-of": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.24.1.tgz", + "integrity": "sha512-OxBdcnF04bpdQdR3i4giHZNZQn7cm8RQKcSwA17wAAqEELo1ZOwp5FFgeptWUQXFyT9kwHo10aqqauYkRZPCAg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-function-name": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.24.1.tgz", + "integrity": "sha512-BXmDZpPlh7jwicKArQASrj8n22/w6iymRnvHYYd2zO30DbE277JO20/7yXJT3QxDPtiQiOxQBbZH4TpivNXIxA==", + "dependencies": { + "@babel/helper-compilation-targets": "^7.23.6", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-json-strings": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.24.1.tgz", + "integrity": "sha512-U7RMFmRvoasscrIFy5xA4gIp8iWnWubnKkKuUGJjsuOH7GfbMkB+XZzeslx2kLdEGdOJDamEmCqOks6e8nv8DQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/plugin-syntax-json-strings": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-literals": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.24.1.tgz", + "integrity": "sha512-zn9pwz8U7nCqOYIiBaOxoQOtYmMODXTJnkxG4AtX8fPmnCRYWBOHD0qcpwS9e2VDSp1zNJYpdnFMIKb8jmwu6g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-logical-assignment-operators": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.24.1.tgz", + "integrity": "sha512-OhN6J4Bpz+hIBqItTeWJujDOfNP+unqv/NJgyhlpSqgBTPm37KkMmZV6SYcOj+pnDbdcl1qRGV/ZiIjX9Iy34w==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-member-expression-literals": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.24.1.tgz", + "integrity": "sha512-4ojai0KysTWXzHseJKa1XPNXKRbuUrhkOPY4rEGeR+7ChlJVKxFa3H3Bz+7tWaGKgJAXUWKOGmltN+u9B3+CVg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-amd": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.24.1.tgz", + "integrity": "sha512-lAxNHi4HVtjnHd5Rxg3D5t99Xm6H7b04hUS7EHIXcUl2EV4yl1gWdqZrNzXnSrHveL9qMdbODlLF55mvgjAfaQ==", + "dependencies": { + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-commonjs": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.24.1.tgz", + "integrity": "sha512-szog8fFTUxBfw0b98gEWPaEqF42ZUD/T3bkynW/wtgx2p/XCP55WEsb+VosKceRSd6njipdZvNogqdtI4Q0chw==", + "dependencies": { + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-simple-access": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-systemjs": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.24.1.tgz", + "integrity": "sha512-mqQ3Zh9vFO1Tpmlt8QPnbwGHzNz3lpNEMxQb1kAemn/erstyqw1r9KeOlOfo3y6xAnFEcOv2tSyrXfmMk+/YZA==", + "dependencies": { + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-validator-identifier": "^7.22.20" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-umd": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.24.1.tgz", + "integrity": "sha512-tuA3lpPj+5ITfcCluy6nWonSL7RvaG0AOTeAuvXqEKS34lnLzXpDb0dcP6K8jD0zWZFNDVly90AGFJPnm4fOYg==", + "dependencies": { + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.22.5.tgz", + "integrity": "sha512-YgLLKmS3aUBhHaxp5hi1WJTgOUb/NCuDHzGT9z9WTt3YG+CPRhJs6nprbStx6DnWM4dh6gt7SU3sZodbZ08adQ==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-new-target": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.24.1.tgz", + "integrity": "sha512-/rurytBM34hYy0HKZQyA0nHbQgQNFm4Q/BOc9Hflxi2X3twRof7NaE5W46j4kQitm7SvACVRXsa6N/tSZxvPug==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-nullish-coalescing-operator": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.24.1.tgz", + "integrity": "sha512-iQ+caew8wRrhCikO5DrUYx0mrmdhkaELgFa+7baMcVuhxIkN7oxt06CZ51D65ugIb1UWRQ8oQe+HXAVM6qHFjw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-numeric-separator": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.24.1.tgz", + "integrity": "sha512-7GAsGlK4cNL2OExJH1DzmDeKnRv/LXq0eLUSvudrehVA5Rgg4bIrqEUW29FbKMBRT0ztSqisv7kjP+XIC4ZMNw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/plugin-syntax-numeric-separator": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-rest-spread": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.24.5.tgz", + "integrity": "sha512-7EauQHszLGM3ay7a161tTQH7fj+3vVM/gThlz5HpFtnygTxjrlvoeq7MPVA1Vy9Q555OB8SnAOsMkLShNkkrHA==", + "dependencies": { + "@babel/helper-compilation-targets": "^7.23.6", + "@babel/helper-plugin-utils": "^7.24.5", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-transform-parameters": "^7.24.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-super": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.24.1.tgz", + "integrity": "sha512-oKJqR3TeI5hSLRxudMjFQ9re9fBVUU0GICqM3J1mi8MqlhVr6hC/ZN4ttAyMuQR6EZZIY6h/exe5swqGNNIkWQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-replace-supers": "^7.24.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-catch-binding": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.24.1.tgz", + "integrity": "sha512-oBTH7oURV4Y+3EUrf6cWn1OHio3qG/PVwO5J03iSJmBg6m2EhKjkAu/xuaXaYwWW9miYtvbWv4LNf0AmR43LUA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-chaining": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.24.5.tgz", + "integrity": "sha512-xWCkmwKT+ihmA6l7SSTpk8e4qQl/274iNbSKRRS8mpqFR32ksy36+a+LWY8OXCCEefF8WFlnOHVsaDI2231wBg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/plugin-syntax-optional-chaining": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-parameters": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.24.5.tgz", + "integrity": "sha512-9Co00MqZ2aoky+4j2jhofErthm6QVLKbpQrvz20c3CH9KQCLHyNB+t2ya4/UrRpQGR+Wrwjg9foopoeSdnHOkA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-methods": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.24.1.tgz", + "integrity": "sha512-tGvisebwBO5em4PaYNqt4fkw56K2VALsAbAakY0FjTYqJp7gfdrgr7YX76Or8/cpik0W6+tj3rZ0uHU9Oil4tw==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.24.1", + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-property-in-object": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.24.5.tgz", + "integrity": "sha512-JM4MHZqnWR04jPMujQDTBVRnqxpLLpx2tkn7iPn+Hmsc0Gnb79yvRWOkvqFOx3Z7P7VxiRIR22c4eGSNj87OBQ==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-create-class-features-plugin": "^7.24.5", + "@babel/helper-plugin-utils": "^7.24.5", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-property-literals": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.24.1.tgz", + "integrity": "sha512-LetvD7CrHmEx0G442gOomRr66d7q8HzzGGr4PMHGr+5YIm6++Yke+jxj246rpvsbyhJwCLxcTn6zW1P1BSenqA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-regenerator": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.24.1.tgz", + "integrity": "sha512-sJwZBCzIBE4t+5Q4IGLaaun5ExVMRY0lYwos/jNecjMrVCygCdph3IKv0tkP5Fc87e/1+bebAmEAGBfnRD+cnw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0", + "regenerator-transform": "^0.15.2" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-reserved-words": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.24.1.tgz", + "integrity": "sha512-JAclqStUfIwKN15HrsQADFgeZt+wexNQ0uLhuqvqAUFoqPMjEcFCYZBhq0LUdz6dZK/mD+rErhW71fbx8RYElg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-shorthand-properties": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.24.1.tgz", + "integrity": "sha512-LyjVB1nsJ6gTTUKRjRWx9C1s9hE7dLfP/knKdrfeH9UPtAGjYGgxIbFfx7xyLIEWs7Xe1Gnf8EWiUqfjLhInZA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-spread": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.24.1.tgz", + "integrity": "sha512-KjmcIM+fxgY+KxPVbjelJC6hrH1CgtPmTvdXAfn3/a9CnWGSTY7nH4zm5+cjmWJybdcPSsD0++QssDsjcpe47g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-sticky-regex": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.24.1.tgz", + "integrity": "sha512-9v0f1bRXgPVcPrngOQvLXeGNNVLc8UjMVfebo9ka0WF3/7+aVUHmaJVT3sa0XCzEFioPfPHZiOcYG9qOsH63cw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-template-literals": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.24.1.tgz", + "integrity": "sha512-WRkhROsNzriarqECASCNu/nojeXCDTE/F2HmRgOzi7NGvyfYGq1NEjKBK3ckLfRgGc6/lPAqP0vDOSw3YtG34g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typeof-symbol": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.24.5.tgz", + "integrity": "sha512-UTGnhYVZtTAjdwOTzT+sCyXmTn8AhaxOS/MjG9REclZ6ULHWF9KoCZur0HSGU7hk8PdBFKKbYe6+gqdXWz84Jg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typescript": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.24.5.tgz", + "integrity": "sha512-E0VWu/hk83BIFUWnsKZ4D81KXjN5L3MobvevOHErASk9IPwKHOkTgvqzvNo1yP/ePJWqqK2SpUR5z+KQbl6NVw==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-create-class-features-plugin": "^7.24.5", + "@babel/helper-plugin-utils": "^7.24.5", + "@babel/plugin-syntax-typescript": "^7.24.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-escapes": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.24.1.tgz", + "integrity": "sha512-RlkVIcWT4TLI96zM660S877E7beKlQw7Ig+wqkKBiWfj0zH5Q4h50q6er4wzZKRNSYpfo6ILJ+hrJAGSX2qcNw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-property-regex": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.24.1.tgz", + "integrity": "sha512-Ss4VvlfYV5huWApFsF8/Sq0oXnGO+jB+rijFEFugTd3cwSObUSnUi88djgR5528Csl0uKlrI331kRqe56Ov2Ng==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.15", + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-regex": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.24.1.tgz", + "integrity": "sha512-2A/94wgZgxfTsiLaQ2E36XAOdcZmGAaEEgVmxQWwZXWkGhvoHbaqXcKnU8zny4ycpu3vNqg0L/PcCiYtHtA13g==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.15", + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-sets-regex": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.24.1.tgz", + "integrity": "sha512-fqj4WuzzS+ukpgerpAoOnMfQXwUHFxXUZUE84oL2Kao2N8uSlvcpnAidKASgsNgzZHBsHWvcm8s9FPWUhAb8fA==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.15", + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/preset-env": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/preset-env/-/preset-env-7.24.5.tgz", + "integrity": "sha512-UGK2ifKtcC8i5AI4cH+sbLLuLc2ktYSFJgBAXorKAsHUZmrQ1q6aQ6i3BvU24wWs2AAKqQB6kq3N9V9Gw1HiMQ==", + "dependencies": { + "@babel/compat-data": "^7.24.4", + "@babel/helper-compilation-targets": "^7.23.6", + "@babel/helper-plugin-utils": "^7.24.5", + "@babel/helper-validator-option": "^7.23.5", + "@babel/plugin-bugfix-firefox-class-in-computed-class-key": "^7.24.5", + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.24.1", + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.24.1", + "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": "^7.24.1", + "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3", + "@babel/plugin-syntax-import-assertions": "^7.24.1", + "@babel/plugin-syntax-import-attributes": "^7.24.1", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5", + "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", + "@babel/plugin-transform-arrow-functions": "^7.24.1", + "@babel/plugin-transform-async-generator-functions": "^7.24.3", + "@babel/plugin-transform-async-to-generator": "^7.24.1", + "@babel/plugin-transform-block-scoped-functions": "^7.24.1", + "@babel/plugin-transform-block-scoping": "^7.24.5", + "@babel/plugin-transform-class-properties": "^7.24.1", + "@babel/plugin-transform-class-static-block": "^7.24.4", + "@babel/plugin-transform-classes": "^7.24.5", + "@babel/plugin-transform-computed-properties": "^7.24.1", + "@babel/plugin-transform-destructuring": "^7.24.5", + "@babel/plugin-transform-dotall-regex": "^7.24.1", + "@babel/plugin-transform-duplicate-keys": "^7.24.1", + "@babel/plugin-transform-dynamic-import": "^7.24.1", + "@babel/plugin-transform-exponentiation-operator": "^7.24.1", + "@babel/plugin-transform-export-namespace-from": "^7.24.1", + "@babel/plugin-transform-for-of": "^7.24.1", + "@babel/plugin-transform-function-name": "^7.24.1", + "@babel/plugin-transform-json-strings": "^7.24.1", + "@babel/plugin-transform-literals": "^7.24.1", + "@babel/plugin-transform-logical-assignment-operators": "^7.24.1", + "@babel/plugin-transform-member-expression-literals": "^7.24.1", + "@babel/plugin-transform-modules-amd": "^7.24.1", + "@babel/plugin-transform-modules-commonjs": "^7.24.1", + "@babel/plugin-transform-modules-systemjs": "^7.24.1", + "@babel/plugin-transform-modules-umd": "^7.24.1", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.22.5", + "@babel/plugin-transform-new-target": "^7.24.1", + "@babel/plugin-transform-nullish-coalescing-operator": "^7.24.1", + "@babel/plugin-transform-numeric-separator": "^7.24.1", + "@babel/plugin-transform-object-rest-spread": "^7.24.5", + "@babel/plugin-transform-object-super": "^7.24.1", + "@babel/plugin-transform-optional-catch-binding": "^7.24.1", + "@babel/plugin-transform-optional-chaining": "^7.24.5", + "@babel/plugin-transform-parameters": "^7.24.5", + "@babel/plugin-transform-private-methods": "^7.24.1", + "@babel/plugin-transform-private-property-in-object": "^7.24.5", + "@babel/plugin-transform-property-literals": "^7.24.1", + "@babel/plugin-transform-regenerator": "^7.24.1", + "@babel/plugin-transform-reserved-words": "^7.24.1", + "@babel/plugin-transform-shorthand-properties": "^7.24.1", + "@babel/plugin-transform-spread": "^7.24.1", + "@babel/plugin-transform-sticky-regex": "^7.24.1", + "@babel/plugin-transform-template-literals": "^7.24.1", + "@babel/plugin-transform-typeof-symbol": "^7.24.5", + "@babel/plugin-transform-unicode-escapes": "^7.24.1", + "@babel/plugin-transform-unicode-property-regex": "^7.24.1", + "@babel/plugin-transform-unicode-regex": "^7.24.1", + "@babel/plugin-transform-unicode-sets-regex": "^7.24.1", + "@babel/preset-modules": "0.1.6-no-external-plugins", + "babel-plugin-polyfill-corejs2": "^0.4.10", + "babel-plugin-polyfill-corejs3": "^0.10.4", + "babel-plugin-polyfill-regenerator": "^0.6.1", + "core-js-compat": "^3.31.0", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-flow": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/preset-flow/-/preset-flow-7.24.1.tgz", + "integrity": "sha512-sWCV2G9pcqZf+JHyv/RyqEIpFypxdCSxWIxQjpdaQxenNog7cN1pr76hg8u0Fz8Qgg0H4ETkGcJnXL8d4j0PPA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-validator-option": "^7.23.5", + "@babel/plugin-transform-flow-strip-types": "^7.24.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-modules": { + "version": "0.1.6-no-external-plugins", + "resolved": "https://registry.npmmirror.com/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz", + "integrity": "sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@babel/types": "^7.4.4", + "esutils": "^2.0.2" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/preset-typescript": { + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/@babel/preset-typescript/-/preset-typescript-7.24.1.tgz", + "integrity": "sha512-1DBaMmRDpuYQBPWD8Pf/WEwCrtgRHxsZnP4mIy9G/X+hFfbI47Q2G4t1Paakld84+qsk2fSsUPMKg71jkoOOaQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-validator-option": "^7.23.5", + "@babel/plugin-syntax-jsx": "^7.24.1", + "@babel/plugin-transform-modules-commonjs": "^7.24.1", + "@babel/plugin-transform-typescript": "^7.24.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/register": { + "version": "7.23.7", + "resolved": "https://registry.npmmirror.com/@babel/register/-/register-7.23.7.tgz", + "integrity": "sha512-EjJeB6+kvpk+Y5DAkEAmbOBEFkh9OASx0huoEkqYTFxAZHzOAX2Oh5uwAUuL2rUddqfM0SA+KPXV2TbzoZ2kvQ==", + "dependencies": { + "clone-deep": "^4.0.1", + "find-cache-dir": "^2.0.0", + "make-dir": "^2.1.0", + "pirates": "^4.0.6", + "source-map-support": "^0.5.16" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/register/node_modules/find-cache-dir": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/find-cache-dir/-/find-cache-dir-2.1.0.tgz", + "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", + "dependencies": { + "commondir": "^1.0.1", + "make-dir": "^2.0.0", + "pkg-dir": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@babel/register/node_modules/find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dependencies": { + "locate-path": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@babel/register/node_modules/locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dependencies": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@babel/register/node_modules/make-dir": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/make-dir/-/make-dir-2.1.0.tgz", + "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", + "dependencies": { + "pify": "^4.0.1", + "semver": "^5.6.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@babel/register/node_modules/p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dependencies": { + "p-limit": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@babel/register/node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/register/node_modules/pify": { + "version": "4.0.1", + "resolved": "https://registry.npmmirror.com/pify/-/pify-4.0.1.tgz", + "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", + "engines": { + "node": ">=6" + } + }, + "node_modules/@babel/register/node_modules/pkg-dir": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/pkg-dir/-/pkg-dir-3.0.0.tgz", + "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", + "dependencies": { + "find-up": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@babel/register/node_modules/semver": { + "version": "5.7.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/@babel/regjsgen": { + "version": "0.8.0", + "resolved": "https://registry.npmmirror.com/@babel/regjsgen/-/regjsgen-0.8.0.tgz", + "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==" + }, + "node_modules/@babel/runtime": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/runtime/-/runtime-7.24.5.tgz", + "integrity": "sha512-Nms86NXrsaeU9vbBJKni6gXiEXZ4CVpYVzEjDH9Sb8vmZ3UljyA1GSOJl/6LGPO8EHLuSF9H+IxNXHPX8QHJ4g==", + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.24.0", + "resolved": "https://registry.npmmirror.com/@babel/template/-/template-7.24.0.tgz", + "integrity": "sha512-Bkf2q8lMB0AFpX0NFEqSbx1OkTHf0f+0j82mkw+ZpzBnkk7e9Ql0891vlfgi+kHwOk8tQjiQHpqh4LaSa0fKEA==", + "dependencies": { + "@babel/code-frame": "^7.23.5", + "@babel/parser": "^7.24.0", + "@babel/types": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/traverse/-/traverse-7.24.5.tgz", + "integrity": "sha512-7aaBLeDQ4zYcUFDUD41lJc1fG8+5IU9DaNSJAgal866FGvmD5EbWQgnEC6kO1gGLsX0esNkfnJSndbTXA3r7UA==", + "dependencies": { + "@babel/code-frame": "^7.24.2", + "@babel/generator": "^7.24.5", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.24.5", + "@babel/parser": "^7.24.5", + "@babel/types": "^7.24.5", + "debug": "^4.3.1", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.24.5", + "resolved": "https://registry.npmmirror.com/@babel/types/-/types-7.24.5.tgz", + "integrity": "sha512-6mQNsaLeXTw0nxYUYu+NSa4Hx4BlF1x1x8/PMFbiR+GBSr+2DkECc69b8hgy2frEodNcvPffeH8YfWd3LI6jhQ==", + "dependencies": { + "@babel/helper-string-parser": "^7.24.1", + "@babel/helper-validator-identifier": "^7.24.5", + "to-fast-properties": "^2.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@bcoe/v8-coverage": { + "version": "0.2.3", + "resolved": "https://registry.npmmirror.com/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", + "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", + "dev": true + }, + "node_modules/@better-scroll/core": { + "version": "2.5.1", + "resolved": "https://registry.npmmirror.com/@better-scroll/core/-/core-2.5.1.tgz", + "integrity": "sha512-koKOuYA55dQ04FJRIVUpMGDr1hbCfWmfX0MGp1hKagkQSWSRpwblqACiwtggVauoj9aaJRJZ9hDsTM4weaavlg==", + "dependencies": { + "@better-scroll/shared-utils": "^2.5.1" + } + }, + "node_modules/@better-scroll/indicators": { + "version": "2.5.1", + "resolved": "https://registry.npmmirror.com/@better-scroll/indicators/-/indicators-2.5.1.tgz", + "integrity": "sha512-Hk+Y00pR6fTsu6C9HGg1yYZtsu1gAcTgcs4C9aM5h6fQANX/T2YIYrOSjZmdL+js2PTcXJWZS8VM4Xjoi1PbfQ==", + "dependencies": { + "@better-scroll/core": "^2.5.1" + } + }, + "node_modules/@better-scroll/infinity": { + "version": "2.5.1", + "resolved": "https://registry.npmmirror.com/@better-scroll/infinity/-/infinity-2.5.1.tgz", + "integrity": "sha512-GKHrrasIh0KlGzhASHDo5hEEBJcDFpP4XaZGPH9Ey8+QBH6/O1ykAXS2ixkVAOTkBrv+KgFXoCUr4oN1xWeM+g==", + "dependencies": { + "@better-scroll/core": "^2.5.1" + } + }, + "node_modules/@better-scroll/mouse-wheel": { + "version": "2.5.1", + "resolved": "https://registry.npmmirror.com/@better-scroll/mouse-wheel/-/mouse-wheel-2.5.1.tgz", + "integrity": "sha512-DGnrirRMY6zMM7xwgx09D/cA9A//3J1/uDkq8iBVEyE5p0sEr/keQpjEfFHGkBRa505BnbBwdbN6f5lugEDSPw==", + "dependencies": { + "@better-scroll/core": "^2.5.1" + } + }, + "node_modules/@better-scroll/movable": { + "version": "2.5.1", + "resolved": "https://registry.npmmirror.com/@better-scroll/movable/-/movable-2.5.1.tgz", + "integrity": "sha512-8bLPRY15bbK4K5+tjrtdaKsFFKmJx72wRdg+xz3xQGFcTD940HFkJiORSOcz8Ufue7eOJfcmreQJBw6XY+TqTw==", + "dependencies": { + "@better-scroll/core": "^2.5.1" + } + }, + "node_modules/@better-scroll/nested-scroll": { + "version": "2.5.1", + "resolved": "https://registry.npmmirror.com/@better-scroll/nested-scroll/-/nested-scroll-2.5.1.tgz", + "integrity": "sha512-3cRsARxf9tq1VWBq7YAaET0xGAmgY1ERMmnXDo2gHFrmsJoNOionlpAeHdZvKQp2jG7JrzJ1O27nGCXf40gnkw==", + "dependencies": { + "@better-scroll/core": "^2.5.1" + } + }, + "node_modules/@better-scroll/observe-dom": { + "version": "2.5.1", + "resolved": "https://registry.npmmirror.com/@better-scroll/observe-dom/-/observe-dom-2.5.1.tgz", + "integrity": "sha512-TCMGFLRfpXBPIwtUV/efliUmfmrhSNI7NXdSyjdWjsLOS7dh3eFkmcom5ERVWMaXVELSmujGXLqobT+dT0C/jg==", + "dependencies": { + "@better-scroll/core": "^2.5.1" + } + }, + "node_modules/@better-scroll/observe-image": { + "version": "2.5.1", + "resolved": "https://registry.npmmirror.com/@better-scroll/observe-image/-/observe-image-2.5.1.tgz", + "integrity": "sha512-0Lhfj83o8EESwOxr8bfStCzNOokTm3KB7JeyMS8u/xl+3tyTuls9889cyAukYk4Yly1cS49pCGfj2P8YOiwtUg==", + "dependencies": { + "@better-scroll/core": "^2.5.1" + } + }, + "node_modules/@better-scroll/pull-down": { + "version": "2.5.1", + "resolved": "https://registry.npmmirror.com/@better-scroll/pull-down/-/pull-down-2.5.1.tgz", + "integrity": "sha512-Y6XcGu2NlevPg3k9VBRRFvpmfoTA+rO96JGdog2qKHclIPNXnsVwsIHtZfAm9weE/f9UuC4BnB+VUFRlucfupg==", + "dependencies": { + "@better-scroll/core": "^2.5.1" + } + }, + "node_modules/@better-scroll/pull-up": { + "version": "2.5.1", + "resolved": "https://registry.npmmirror.com/@better-scroll/pull-up/-/pull-up-2.5.1.tgz", + "integrity": "sha512-1hu3xSMxdB8T391KffpNZ7g93lMwZEHjfb1F1Y4KvIkciDt8nXqkGpqrZF+YwR+EJTgYcWqUO8kgmI6XXu7Pkg==", + "dependencies": { + "@better-scroll/core": "^2.5.1" + } + }, + "node_modules/@better-scroll/scroll-bar": { + "version": "2.5.1", + "resolved": "https://registry.npmmirror.com/@better-scroll/scroll-bar/-/scroll-bar-2.5.1.tgz", + "integrity": "sha512-i6r60pWG/ztkFK2j5Gj54I0LJb2jGh5TWJNQBoW0gUkp28B+0JvBFTwZn9tF7beZCBorKR7Hvvu4O9A1TJy94Q==", + "dependencies": { + "@better-scroll/core": "^2.5.1" + } + }, + "node_modules/@better-scroll/shared-utils": { + "version": "2.5.1", + "resolved": "https://registry.npmmirror.com/@better-scroll/shared-utils/-/shared-utils-2.5.1.tgz", + "integrity": "sha512-AplkfSjXVYP9LZiD6JsKgmgQJ/mG4uuLmBuwLz8W5OsYc7AYTfN8kw6GqZ5OwCGoXkVhBGyd8NeC4xwYItp0aw==" + }, + "node_modules/@better-scroll/slide": { + "version": "2.5.1", + "resolved": "https://registry.npmmirror.com/@better-scroll/slide/-/slide-2.5.1.tgz", + "integrity": "sha512-aDOrfsmjAcz6DXN7mDX3tPieAn195R43Yn9e3waI19TIEok/mQlI1a/kb5quqWOoxkiaZQ8xe3vx5ZTj9C+F6Q==", + "dependencies": { + "@better-scroll/core": "^2.5.1" + } + }, + "node_modules/@better-scroll/wheel": { + "version": "2.5.1", + "resolved": "https://registry.npmmirror.com/@better-scroll/wheel/-/wheel-2.5.1.tgz", + "integrity": "sha512-fYLcEvkh88Z/2L+P5/+SGMunuc+HzAjGOiORIa/x21qb/knO2RFH4A/V1Rt3OIW4QluWzuFnU6jJRPlsQVZ4fg==", + "dependencies": { + "@better-scroll/core": "^2.5.1" + } + }, + "node_modules/@better-scroll/zoom": { + "version": "2.5.1", + "resolved": "https://registry.npmmirror.com/@better-scroll/zoom/-/zoom-2.5.1.tgz", + "integrity": "sha512-aGvFY5ooeZWS4RcxQLD+pGLpQHQxpPy0sMZV3yadcd2QK53PK9gS4Dp+BYfRv8lZ4/P2LoNEhr6Wq1DN6+uPlA==", + "dependencies": { + "@better-scroll/core": "^2.5.1" + } + }, + "node_modules/@ctrl/tinycolor": { + "version": "3.6.1", + "resolved": "https://registry.npmmirror.com/@ctrl/tinycolor/-/tinycolor-3.6.1.tgz", + "integrity": "sha512-SITSV6aIXsuVNV3f3O0f2n/cgyEDWoSqtZMYiAmcsYHydcKrOz3gUxB/iXd/Qf08+IZX4KpgNbvUdMBmWz+kcA==", + "engines": { + "node": ">=10" + } + }, + "node_modules/@discoveryjs/json-ext": { + "version": "0.5.7", + "resolved": "https://registry.npmmirror.com/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz", + "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/@element-plus/icons-vue": { + "version": "2.3.1", + "resolved": "https://registry.npmmirror.com/@element-plus/icons-vue/-/icons-vue-2.3.1.tgz", + "integrity": "sha512-XxVUZv48RZAd87ucGS48jPf6pKu0yV5UCg9f4FFwtrYxXOwWuVJo6wOvSLKEoMQKjv8GsX/mhP6UsC1lRwbUWg==", + "peerDependencies": { + "vue": "^3.2.0" + } + }, + "node_modules/@emotion/hash": { + "version": "0.9.1", + "resolved": "https://registry.npmmirror.com/@emotion/hash/-/hash-0.9.1.tgz", + "integrity": "sha512-gJB6HLm5rYwSLI6PQa+X1t5CFGrv1J1TWG+sOyMCeKz2ojaj6Fnl/rZEspogG+cvqbt4AE/2eIyD2QfLKTBNlQ==" + }, + "node_modules/@emotion/unitless": { + "version": "0.8.1", + "resolved": "https://registry.npmmirror.com/@emotion/unitless/-/unitless-0.8.1.tgz", + "integrity": "sha512-KOEGMu6dmJZtpadb476IsZBclKvILjopjUii3V+7MnXIQCYh8W3NgNcgwo21n9LXZX6EDIKvqfjYxXebDwxKmQ==" + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.4.0", + "resolved": "https://registry.npmmirror.com/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", + "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==", + "dev": true, + "dependencies": { + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.10.0", + "resolved": "https://registry.npmmirror.com/@eslint-community/regexpp/-/regexpp-4.10.0.tgz", + "integrity": "sha512-Cu96Sd2By9mCNTx2iyKOmq10v22jUVQv0lQnlGNy16oE9589yE+QADPbrMGCkA51cKZSg3Pu/aTJVTGfL/qjUA==", + "dev": true, + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "0.4.3", + "resolved": "https://registry.npmmirror.com/@eslint/eslintrc/-/eslintrc-0.4.3.tgz", + "integrity": "sha512-J6KFFz5QCYUJq3pf0mjEcCJVERbzv71PUIDczuh9JkwGEzced6CO5ADLHB1rbf/+oPBtoPfMYNOpGDzCANlbXw==", + "dev": true, + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.1.1", + "espree": "^7.3.0", + "globals": "^13.9.0", + "ignore": "^4.0.6", + "import-fresh": "^3.2.1", + "js-yaml": "^3.13.1", + "minimatch": "^3.0.4", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/@eslint/eslintrc/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmmirror.com/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/@eslint/eslintrc/node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmmirror.com/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@eslint/eslintrc/node_modules/ignore": { + "version": "4.0.6", + "resolved": "https://registry.npmmirror.com/ignore/-/ignore-4.0.6.tgz", + "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/@eslint/eslintrc/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmmirror.com/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dev": true, + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/@floating-ui/core": { + "version": "1.6.2", + "resolved": "https://registry.npmmirror.com/@floating-ui/core/-/core-1.6.2.tgz", + "integrity": "sha512-+2XpQV9LLZeanU4ZevzRnGFg2neDeKHgFLjP6YLW+tly0IvrhqT4u8enLGjLH3qeh85g19xY5rsAusfwTdn5lg==", + "dependencies": { + "@floating-ui/utils": "^0.2.0" + } + }, + "node_modules/@floating-ui/dom": { + "version": "1.6.5", + "resolved": "https://registry.npmmirror.com/@floating-ui/dom/-/dom-1.6.5.tgz", + "integrity": "sha512-Nsdud2X65Dz+1RHjAIP0t8z5e2ff/IRbei6BqFrl1urT8sDVzM1HMQ+R0XcU5ceRfyO3I6ayeqIfh+6Wb8LGTw==", + "dependencies": { + "@floating-ui/core": "^1.0.0", + "@floating-ui/utils": "^0.2.0" + } + }, + "node_modules/@floating-ui/utils": { + "version": "0.2.2", + "resolved": "https://registry.npmmirror.com/@floating-ui/utils/-/utils-0.2.2.tgz", + "integrity": "sha512-J4yDIIthosAsRZ5CPYP/jQvUAQtlZTTD/4suA08/FEnlxqW3sKS9iAhgsa9VYLZ6vDHn/ixJgIqRQPotoBjxIw==" + }, + "node_modules/@graphql-tools/merge": { + "version": "8.3.1", + "resolved": "https://registry.npmmirror.com/@graphql-tools/merge/-/merge-8.3.1.tgz", + "integrity": "sha512-BMm99mqdNZbEYeTPK3it9r9S6rsZsQKtlqJsSBknAclXq2pGEfOxjcIZi+kBSkHZKPKCRrYDd5vY0+rUmIHVLg==", + "dependencies": { + "@graphql-tools/utils": "8.9.0", + "tslib": "^2.4.0" + }, + "peerDependencies": { + "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" + } + }, + "node_modules/@graphql-tools/mock": { + "version": "8.7.20", + "resolved": "https://registry.npmmirror.com/@graphql-tools/mock/-/mock-8.7.20.tgz", + "integrity": "sha512-ljcHSJWjC/ZyzpXd5cfNhPI7YljRVvabKHPzKjEs5ElxWu2cdlLGvyNYepApXDsM/OJG/2xuhGM+9GWu5gEAPQ==", + "dependencies": { + "@graphql-tools/schema": "^9.0.18", + "@graphql-tools/utils": "^9.2.1", + "fast-json-stable-stringify": "^2.1.0", + "tslib": "^2.4.0" + }, + "peerDependencies": { + "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" + } + }, + "node_modules/@graphql-tools/mock/node_modules/@graphql-tools/merge": { + "version": "8.4.2", + "resolved": "https://registry.npmmirror.com/@graphql-tools/merge/-/merge-8.4.2.tgz", + "integrity": "sha512-XbrHAaj8yDuINph+sAfuq3QCZ/tKblrTLOpirK0+CAgNlZUCHs0Fa+xtMUURgwCVThLle1AF7svJCxFizygLsw==", + "dependencies": { + "@graphql-tools/utils": "^9.2.1", + "tslib": "^2.4.0" + }, + "peerDependencies": { + "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" + } + }, + "node_modules/@graphql-tools/mock/node_modules/@graphql-tools/schema": { + "version": "9.0.19", + "resolved": "https://registry.npmmirror.com/@graphql-tools/schema/-/schema-9.0.19.tgz", + "integrity": "sha512-oBRPoNBtCkk0zbUsyP4GaIzCt8C0aCI4ycIRUL67KK5pOHljKLBBtGT+Jr6hkzA74C8Gco8bpZPe7aWFjiaK2w==", + "dependencies": { + "@graphql-tools/merge": "^8.4.1", + "@graphql-tools/utils": "^9.2.1", + "tslib": "^2.4.0", + "value-or-promise": "^1.0.12" + }, + "peerDependencies": { + "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" + } + }, + "node_modules/@graphql-tools/mock/node_modules/@graphql-tools/utils": { + "version": "9.2.1", + "resolved": "https://registry.npmmirror.com/@graphql-tools/utils/-/utils-9.2.1.tgz", + "integrity": "sha512-WUw506Ql6xzmOORlriNrD6Ugx+HjVgYxt9KCXD9mHAak+eaXSwuGGPyE60hy9xaDEoXKBsG7SkG69ybitaVl6A==", + "dependencies": { + "@graphql-typed-document-node/core": "^3.1.1", + "tslib": "^2.4.0" + }, + "peerDependencies": { + "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" + } + }, + "node_modules/@graphql-tools/mock/node_modules/value-or-promise": { + "version": "1.0.12", + "resolved": "https://registry.npmmirror.com/value-or-promise/-/value-or-promise-1.0.12.tgz", + "integrity": "sha512-Z6Uz+TYwEqE7ZN50gwn+1LCVo9ZVrpxRPOhOLnncYkY1ZzOYtrX8Fwf/rFktZ8R5mJms6EZf5TqNOMeZmnPq9Q==", + "engines": { + "node": ">=12" + } + }, + "node_modules/@graphql-tools/schema": { + "version": "8.5.1", + "resolved": "https://registry.npmmirror.com/@graphql-tools/schema/-/schema-8.5.1.tgz", + "integrity": "sha512-0Esilsh0P/qYcB5DKQpiKeQs/jevzIadNTaT0jeWklPMwNbT7yMX4EqZany7mbeRRlSRwMzNzL5olyFdffHBZg==", + "dependencies": { + "@graphql-tools/merge": "8.3.1", + "@graphql-tools/utils": "8.9.0", + "tslib": "^2.4.0", + "value-or-promise": "1.0.11" + }, + "peerDependencies": { + "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" + } + }, + "node_modules/@graphql-tools/utils": { + "version": "8.9.0", + "resolved": "https://registry.npmmirror.com/@graphql-tools/utils/-/utils-8.9.0.tgz", + "integrity": "sha512-pjJIWH0XOVnYGXCqej8g/u/tsfV4LvLlj0eATKQu5zwnxd/TiTHq7Cg313qUPTFFHZ3PP5wJ15chYVtLDwaymg==", + "dependencies": { + "tslib": "^2.4.0" + }, + "peerDependencies": { + "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" + } + }, + "node_modules/@graphql-typed-document-node/core": { + "version": "3.2.0", + "resolved": "https://registry.npmmirror.com/@graphql-typed-document-node/core/-/core-3.2.0.tgz", + "integrity": "sha512-mB9oAsNCm9aM3/SOv4YtBMqZbYj10R7dkq8byBqxGY/ncFwhf2oQzMV+LCRlWoDSEBJ3COiR1yeDvMtsoOsuFQ==", + "peerDependencies": { + "graphql": "^0.8.0 || ^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" + } + }, + "node_modules/@hapi/hoek": { + "version": "9.3.0", + "resolved": "https://registry.npmmirror.com/@hapi/hoek/-/hoek-9.3.0.tgz", + "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==" + }, + "node_modules/@hapi/topo": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/@hapi/topo/-/topo-5.1.0.tgz", + "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.5.0", + "resolved": "https://registry.npmmirror.com/@humanwhocodes/config-array/-/config-array-0.5.0.tgz", + "integrity": "sha512-FagtKFz74XrTl7y6HCzQpwDfXP0yhxe9lHLD1UZxjvZIcbyRz8zTFF/yYNfSfzU414eDwZ1SrO0Qvtyf+wFMQg==", + "dev": true, + "dependencies": { + "@humanwhocodes/object-schema": "^1.2.0", + "debug": "^4.1.1", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "1.2.1", + "resolved": "https://registry.npmmirror.com/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz", + "integrity": "sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==", + "dev": true + }, + "node_modules/@intlify/core-base": { + "version": "9.13.1", + "resolved": "https://registry.npmmirror.com/@intlify/core-base/-/core-base-9.13.1.tgz", + "integrity": "sha512-+bcQRkJO9pcX8d0gel9ZNfrzU22sZFSA0WVhfXrf5jdJOS24a+Bp8pozuS9sBI9Hk/tGz83pgKfmqcn/Ci7/8w==", + "dependencies": { + "@intlify/message-compiler": "9.13.1", + "@intlify/shared": "9.13.1" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + } + }, + "node_modules/@intlify/message-compiler": { + "version": "9.13.1", + "resolved": "https://registry.npmmirror.com/@intlify/message-compiler/-/message-compiler-9.13.1.tgz", + "integrity": "sha512-SKsVa4ajYGBVm7sHMXd5qX70O2XXjm55zdZB3VeMFCvQyvLew/dLvq3MqnaIsTMF1VkkOb9Ttr6tHcMlyPDL9w==", + "dependencies": { + "@intlify/shared": "9.13.1", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + } + }, + "node_modules/@intlify/shared": { + "version": "9.13.1", + "resolved": "https://registry.npmmirror.com/@intlify/shared/-/shared-9.13.1.tgz", + "integrity": "sha512-u3b6BKGhE6j/JeRU6C/RL2FgyJfy6LakbtfeVF8fJXURpZZTzfh3e05J0bu0XPw447Q6/WUp3C4ajv4TMS4YsQ==", + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + } + }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmmirror.com/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "dev": true, + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmmirror.com/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "dev": true + }, + "node_modules/@isaacs/cliui/node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmmirror.com/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dev": true, + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@isaacs/cliui/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmmirror.com/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dev": true, + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/@istanbuljs/load-nyc-config": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", + "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", + "dev": true, + "dependencies": { + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmmirror.com/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmmirror.com/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dev": true, + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmmirror.com/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/console": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/@jest/console/-/console-27.5.1.tgz", + "integrity": "sha512-kZ/tNpS3NXn0mlXXXPNuDZnb4c0oZ20r4K5eemM2k30ZC3G0T02nXUvyhf5YdbXWHPEJLc9qGLxEZ216MdL+Zg==", + "dev": true, + "dependencies": { + "@jest/types": "^27.5.1", + "@types/node": "*", + "chalk": "^4.0.0", + "jest-message-util": "^27.5.1", + "jest-util": "^27.5.1", + "slash": "^3.0.0" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/@jest/console/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@jest/console/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/@jest/console/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/@jest/console/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/@jest/console/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/console/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/core": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/@jest/core/-/core-27.5.1.tgz", + "integrity": "sha512-AK6/UTrvQD0Cd24NSqmIA6rKsu0tKIxfiCducZvqxYdmMisOYAsdItspT+fQDQYARPf8XgjAFZi0ogW2agH5nQ==", + "dev": true, + "dependencies": { + "@jest/console": "^27.5.1", + "@jest/reporters": "^27.5.1", + "@jest/test-result": "^27.5.1", + "@jest/transform": "^27.5.1", + "@jest/types": "^27.5.1", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "emittery": "^0.8.1", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-changed-files": "^27.5.1", + "jest-config": "^27.5.1", + "jest-haste-map": "^27.5.1", + "jest-message-util": "^27.5.1", + "jest-regex-util": "^27.5.1", + "jest-resolve": "^27.5.1", + "jest-resolve-dependencies": "^27.5.1", + "jest-runner": "^27.5.1", + "jest-runtime": "^27.5.1", + "jest-snapshot": "^27.5.1", + "jest-util": "^27.5.1", + "jest-validate": "^27.5.1", + "jest-watcher": "^27.5.1", + "micromatch": "^4.0.4", + "rimraf": "^3.0.0", + "slash": "^3.0.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/core/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@jest/core/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/@jest/core/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/@jest/core/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/@jest/core/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/core/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/environment": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/@jest/environment/-/environment-27.5.1.tgz", + "integrity": "sha512-/WQjhPJe3/ghaol/4Bq480JKXV/Rfw8nQdN7f41fM8VDHLcxKXou6QyXAh3EFr9/bVG3x74z1NWDkP87EiY8gA==", + "dev": true, + "dependencies": { + "@jest/fake-timers": "^27.5.1", + "@jest/types": "^27.5.1", + "@types/node": "*", + "jest-mock": "^27.5.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/@jest/fake-timers": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/@jest/fake-timers/-/fake-timers-27.5.1.tgz", + "integrity": "sha512-/aPowoolwa07k7/oM3aASneNeBGCmGQsc3ugN4u6s4C/+s5M64MFo/+djTdiwcbQlRfFElGuDXWzaWj6QgKObQ==", + "dev": true, + "dependencies": { + "@jest/types": "^27.5.1", + "@sinonjs/fake-timers": "^8.0.1", + "@types/node": "*", + "jest-message-util": "^27.5.1", + "jest-mock": "^27.5.1", + "jest-util": "^27.5.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/@jest/globals": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/@jest/globals/-/globals-27.5.1.tgz", + "integrity": "sha512-ZEJNB41OBQQgGzgyInAv0UUfDDj3upmHydjieSxFvTRuZElrx7tXg/uVQ5hYVEwiXs3+aMsAeEc9X7xiSKCm4Q==", + "dev": true, + "dependencies": { + "@jest/environment": "^27.5.1", + "@jest/types": "^27.5.1", + "expect": "^27.5.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/@jest/reporters": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/@jest/reporters/-/reporters-27.5.1.tgz", + "integrity": "sha512-cPXh9hWIlVJMQkVk84aIvXuBB4uQQmFqZiacloFuGiP3ah1sbCxCosidXFDfqG8+6fO1oR2dTJTlsOy4VFmUfw==", + "dev": true, + "dependencies": { + "@bcoe/v8-coverage": "^0.2.3", + "@jest/console": "^27.5.1", + "@jest/test-result": "^27.5.1", + "@jest/transform": "^27.5.1", + "@jest/types": "^27.5.1", + "@types/node": "*", + "chalk": "^4.0.0", + "collect-v8-coverage": "^1.0.0", + "exit": "^0.1.2", + "glob": "^7.1.2", + "graceful-fs": "^4.2.9", + "istanbul-lib-coverage": "^3.0.0", + "istanbul-lib-instrument": "^5.1.0", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.0", + "istanbul-reports": "^3.1.3", + "jest-haste-map": "^27.5.1", + "jest-resolve": "^27.5.1", + "jest-util": "^27.5.1", + "jest-worker": "^27.5.1", + "slash": "^3.0.0", + "source-map": "^0.6.0", + "string-length": "^4.0.1", + "terminal-link": "^2.0.0", + "v8-to-istanbul": "^8.1.0" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/reporters/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@jest/reporters/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/@jest/reporters/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/@jest/reporters/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/@jest/reporters/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/reporters/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/schemas": { + "version": "28.1.3", + "resolved": "https://registry.npmmirror.com/@jest/schemas/-/schemas-28.1.3.tgz", + "integrity": "sha512-/l/VWsdt/aBXgjshLWOFyFt3IVdYypu5y2Wn2rOO1un6nkqIn8SLXzgIMYXFyYsRWDyF5EthmKJMIdJvk08grg==", + "dev": true, + "dependencies": { + "@sinclair/typebox": "^0.24.1" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || ^16.10.0 || >=17.0.0" + } + }, + "node_modules/@jest/source-map": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/@jest/source-map/-/source-map-27.5.1.tgz", + "integrity": "sha512-y9NIHUYF3PJRlHk98NdC/N1gl88BL08aQQgu4k4ZopQkCw9t9cV8mtl3TV8b/YCB8XaVTFrmUTAJvjsntDireg==", + "dev": true, + "dependencies": { + "callsites": "^3.0.0", + "graceful-fs": "^4.2.9", + "source-map": "^0.6.0" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/@jest/test-result": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/@jest/test-result/-/test-result-27.5.1.tgz", + "integrity": "sha512-EW35l2RYFUcUQxFJz5Cv5MTOxlJIQs4I7gxzi2zVU7PJhOwfYq1MdC5nhSmYjX1gmMmLPvB3sIaC+BkcHRBfag==", + "dev": true, + "dependencies": { + "@jest/console": "^27.5.1", + "@jest/types": "^27.5.1", + "@types/istanbul-lib-coverage": "^2.0.0", + "collect-v8-coverage": "^1.0.0" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/@jest/test-sequencer": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/@jest/test-sequencer/-/test-sequencer-27.5.1.tgz", + "integrity": "sha512-LCheJF7WB2+9JuCS7VB/EmGIdQuhtqjRNI9A43idHv3E4KltCTsPsLxvdaubFHSYwY/fNjMWjl6vNRhDiN7vpQ==", + "dev": true, + "dependencies": { + "@jest/test-result": "^27.5.1", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^27.5.1", + "jest-runtime": "^27.5.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/@jest/transform": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/@jest/transform/-/transform-27.5.1.tgz", + "integrity": "sha512-ipON6WtYgl/1329g5AIJVbUuEh0wZVbdpGwC99Jw4LwuoBNS95MVphU6zOeD9pDkon+LLbFL7lOQRapbB8SCHw==", + "dev": true, + "dependencies": { + "@babel/core": "^7.1.0", + "@jest/types": "^27.5.1", + "babel-plugin-istanbul": "^6.1.1", + "chalk": "^4.0.0", + "convert-source-map": "^1.4.0", + "fast-json-stable-stringify": "^2.0.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^27.5.1", + "jest-regex-util": "^27.5.1", + "jest-util": "^27.5.1", + "micromatch": "^4.0.4", + "pirates": "^4.0.4", + "slash": "^3.0.0", + "source-map": "^0.6.1", + "write-file-atomic": "^3.0.0" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/@jest/transform/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@jest/transform/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/@jest/transform/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/@jest/transform/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/@jest/transform/node_modules/convert-source-map": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/convert-source-map/-/convert-source-map-1.9.0.tgz", + "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==", + "dev": true + }, + "node_modules/@jest/transform/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/transform/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/types": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/@jest/types/-/types-27.5.1.tgz", + "integrity": "sha512-Cx46iJ9QpwQTjIdq5VJu2QTMMs3QlEjI0x1QbBP5W1+nMzyc2XmimiRR/CbX9TO0cPTeUlxWMOu8mslYsJ8DEw==", + "dev": true, + "dependencies": { + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^16.0.0", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/@jest/types/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@jest/types/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/@jest/types/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/@jest/types/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/@jest/types/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/types/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@josephg/resolvable": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/@josephg/resolvable/-/resolvable-1.0.1.tgz", + "integrity": "sha512-CtzORUwWTTOTqfVtHaKRJ0I1kNQd1bpn3sUh8I3nJDVY+5/M/Oe1DnEWzPQvqq/xPIIkzzzIP7mfCoAjFRvDhg==" + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.5", + "resolved": "https://registry.npmmirror.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", + "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", + "dependencies": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmmirror.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.2.1", + "resolved": "https://registry.npmmirror.com/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/source-map": { + "version": "0.3.6", + "resolved": "https://registry.npmmirror.com/@jridgewell/source-map/-/source-map-0.3.6.tgz", + "integrity": "sha512-1ZJTZebgqllO79ue2bm3rIGud/bOe0pP5BjSRCRxxYkEZS8STV7zN84UBbiYu7jy+eCKSnVIUgoWWE/tt+shMQ==", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmmirror.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "resolved": "https://registry.npmmirror.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@leichtgewicht/ip-codec": { + "version": "2.0.5", + "resolved": "https://registry.npmmirror.com/@leichtgewicht/ip-codec/-/ip-codec-2.0.5.tgz", + "integrity": "sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw==", + "dev": true + }, + "node_modules/@mapbox/node-pre-gyp": { + "version": "1.0.11", + "resolved": "https://registry.npmmirror.com/@mapbox/node-pre-gyp/-/node-pre-gyp-1.0.11.tgz", + "integrity": "sha512-Yhlar6v9WQgUp/He7BdgzOz8lqMQ8sU+jkCq7Wx8Myc5YFJLbEe7lgui/V7G1qB1DJykHSGwreceSaD60Y0PUQ==", + "optional": true, + "dependencies": { + "detect-libc": "^2.0.0", + "https-proxy-agent": "^5.0.0", + "make-dir": "^3.1.0", + "node-fetch": "^2.6.7", + "nopt": "^5.0.0", + "npmlog": "^5.0.1", + "rimraf": "^3.0.2", + "semver": "^7.3.5", + "tar": "^6.1.11" + }, + "bin": { + "node-pre-gyp": "bin/node-pre-gyp" + } + }, + "node_modules/@mapbox/node-pre-gyp/node_modules/abbrev": { + "version": "1.1.1", + "resolved": "https://registry.npmmirror.com/abbrev/-/abbrev-1.1.1.tgz", + "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", + "optional": true + }, + "node_modules/@mapbox/node-pre-gyp/node_modules/nopt": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/nopt/-/nopt-5.0.0.tgz", + "integrity": "sha512-Tbj67rffqceeLpcRXrT7vKAN8CwfPeIBgM7E6iBkmKLV7bEMwpGgYLGv0jACUsECaa/vuxP0IjEont6umdMgtQ==", + "optional": true, + "dependencies": { + "abbrev": "1" + }, + "bin": { + "nopt": "bin/nopt.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@mapbox/node-pre-gyp/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "optional": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@node-ipc/js-queue": { + "version": "2.0.3", + "resolved": "https://registry.npmmirror.com/@node-ipc/js-queue/-/js-queue-2.0.3.tgz", + "integrity": "sha512-fL1wpr8hhD5gT2dA1qifeVaoDFlQR5es8tFuKqjHX+kdOtdNHnxkVZbtIrR2rxnMFvehkjaZRNV2H/gPXlb0hw==", + "dependencies": { + "easy-stack": "1.0.1" + }, + "engines": { + "node": ">=1.0.0" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmmirror.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmmirror.com/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmmirror.com/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@one-ini/wasm": { + "version": "0.1.1", + "resolved": "https://registry.npmmirror.com/@one-ini/wasm/-/wasm-0.1.1.tgz", + "integrity": "sha512-XuySG1E38YScSJoMlqovLru4KTUNSjgVTIjyh7qMX6aNN5HY5Ct5LhRJdxO79JtTzKfzV/bnWpz+zquYrISsvw==", + "dev": true + }, + "node_modules/@pdf-lib/standard-fonts": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/@pdf-lib/standard-fonts/-/standard-fonts-1.0.0.tgz", + "integrity": "sha512-hU30BK9IUN/su0Mn9VdlVKsWBS6GyhVfqjwl1FjZN4TxP6cCw0jP2w7V3Hf5uX7M0AZJ16vey9yE0ny7Sa59ZA==", + "dependencies": { + "pako": "^1.0.6" + } + }, + "node_modules/@pdf-lib/upng": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/@pdf-lib/upng/-/upng-1.0.1.tgz", + "integrity": "sha512-dQK2FUMQtowVP00mtIksrlZhdFXQZPC+taih1q4CvPZ5vqdxR/LKBaFg0oAfzd1GlHZXXSPdQfzQnt+ViGvEIQ==", + "dependencies": { + "pako": "^1.0.10" + } + }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmmirror.com/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "dev": true, + "optional": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/@polka/url": { + "version": "1.0.0-next.25", + "resolved": "https://registry.npmmirror.com/@polka/url/-/url-1.0.0-next.25.tgz", + "integrity": "sha512-j7P6Rgr3mmtdkeDGTe0E/aYyWEWVtc5yFXtHCRHs28/jptDEWfaVOc5T7cblqy1XKPPfCxJc/8DwQ5YgLOZOVQ==", + "dev": true + }, + "node_modules/@popperjs/core": { + "name": "@sxzz/popperjs-es", + "version": "2.11.7", + "resolved": "https://registry.npmmirror.com/@sxzz/popperjs-es/-/popperjs-es-2.11.7.tgz", + "integrity": "sha512-Ccy0NlLkzr0Ex2FKvh2X+OyERHXJ88XJ1MXtsI9y9fGexlaXaVTPzBCRBwIxFkORuOb+uBqeu+RqnpgYTEZRUQ==" + }, + "node_modules/@protobufjs/aspromise": { + "version": "1.1.2", + "resolved": "https://registry.npmmirror.com/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", + "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==" + }, + "node_modules/@protobufjs/base64": { + "version": "1.1.2", + "resolved": "https://registry.npmmirror.com/@protobufjs/base64/-/base64-1.1.2.tgz", + "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==" + }, + "node_modules/@protobufjs/codegen": { + "version": "2.0.4", + "resolved": "https://registry.npmmirror.com/@protobufjs/codegen/-/codegen-2.0.4.tgz", + "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==" + }, + "node_modules/@protobufjs/eventemitter": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", + "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==" + }, + "node_modules/@protobufjs/fetch": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/@protobufjs/fetch/-/fetch-1.1.0.tgz", + "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", + "dependencies": { + "@protobufjs/aspromise": "^1.1.1", + "@protobufjs/inquire": "^1.1.0" + } + }, + "node_modules/@protobufjs/float": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/@protobufjs/float/-/float-1.0.2.tgz", + "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==" + }, + "node_modules/@protobufjs/inquire": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/@protobufjs/inquire/-/inquire-1.1.0.tgz", + "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==" + }, + "node_modules/@protobufjs/path": { + "version": "1.1.2", + "resolved": "https://registry.npmmirror.com/@protobufjs/path/-/path-1.1.2.tgz", + "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==" + }, + "node_modules/@protobufjs/pool": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/@protobufjs/pool/-/pool-1.1.0.tgz", + "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==" + }, + "node_modules/@protobufjs/utf8": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/@protobufjs/utf8/-/utf8-1.1.0.tgz", + "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==" + }, + "node_modules/@sideway/address": { + "version": "4.1.5", + "resolved": "https://registry.npmmirror.com/@sideway/address/-/address-4.1.5.tgz", + "integrity": "sha512-IqO/DUQHUkPeixNQ8n0JA6102hT9CmaljNTPmQ1u8MEhBo/R4Q8eKLN/vGZxuebwOroDB4cbpjheD4+/sKFK4Q==", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@sideway/formula": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/@sideway/formula/-/formula-3.0.1.tgz", + "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==" + }, + "node_modules/@sideway/pinpoint": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", + "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==" + }, + "node_modules/@simonwep/pickr": { + "version": "1.8.2", + "resolved": "https://registry.npmmirror.com/@simonwep/pickr/-/pickr-1.8.2.tgz", + "integrity": "sha512-/l5w8BIkrpP6n1xsetx9MWPWlU6OblN5YgZZphxan0Tq4BByTCETL6lyIeY8lagalS2Nbt4F2W034KHLIiunKA==", + "dependencies": { + "core-js": "^3.15.1", + "nanopop": "^2.1.0" + } + }, + "node_modules/@sinclair/typebox": { + "version": "0.24.51", + "resolved": "https://registry.npmmirror.com/@sinclair/typebox/-/typebox-0.24.51.tgz", + "integrity": "sha512-1P1OROm/rdubP5aFDSZQILU0vrLCJ4fvHt6EoqHEM+2D/G5MK3bIaymUKLit8Js9gbns5UyJnkP/TZROLw4tUA==", + "dev": true + }, + "node_modules/@sindresorhus/is": { + "version": "0.7.0", + "resolved": "https://registry.npmmirror.com/@sindresorhus/is/-/is-0.7.0.tgz", + "integrity": "sha512-ONhaKPIufzzrlNbqtWFFd+jlnemX6lJAgq9ZeiZtS7I1PIf/la7CW4m83rTXRnVnsMbW2k56pGYu7AUFJD9Pow==", + "engines": { + "node": ">=4" + } + }, + "node_modules/@sinonjs/commons": { + "version": "1.8.6", + "resolved": "https://registry.npmmirror.com/@sinonjs/commons/-/commons-1.8.6.tgz", + "integrity": "sha512-Ky+XkAkqPZSm3NLBeUng77EBQl3cmeJhITaGHdYH8kjVB+aun3S4XBRti2zt17mtt0mIUDiNxYeoJm6drVvBJQ==", + "dev": true, + "dependencies": { + "type-detect": "4.0.8" + } + }, + "node_modules/@sinonjs/fake-timers": { + "version": "8.1.0", + "resolved": "https://registry.npmmirror.com/@sinonjs/fake-timers/-/fake-timers-8.1.0.tgz", + "integrity": "sha512-OAPJUAtgeINhh/TAlUID4QTs53Njm7xzddaVlEs/SXwgtiD1tW22zAB/W1wdqfrpmikgaWQ9Fw6Ws+hsiRm5Vg==", + "dev": true, + "dependencies": { + "@sinonjs/commons": "^1.7.0" + } + }, + "node_modules/@soda/friendly-errors-webpack-plugin": { + "version": "1.8.1", + "resolved": "https://registry.npmmirror.com/@soda/friendly-errors-webpack-plugin/-/friendly-errors-webpack-plugin-1.8.1.tgz", + "integrity": "sha512-h2ooWqP8XuFqTXT+NyAFbrArzfQA7R6HTezADrvD9Re8fxMLTPPniLdqVTdDaO0eIoLaAwKT+d6w+5GeTk7Vbg==", + "dev": true, + "dependencies": { + "chalk": "^3.0.0", + "error-stack-parser": "^2.0.6", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8.0.0" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/@soda/friendly-errors-webpack-plugin/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@soda/friendly-errors-webpack-plugin/node_modules/chalk": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-3.0.0.tgz", + "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@soda/friendly-errors-webpack-plugin/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/@soda/friendly-errors-webpack-plugin/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/@soda/friendly-errors-webpack-plugin/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@soda/friendly-errors-webpack-plugin/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@soda/get-current-script": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/@soda/get-current-script/-/get-current-script-1.0.2.tgz", + "integrity": "sha512-T7VNNlYVM1SgQ+VsMYhnDkcGmWhQdL0bDyGm5TlQ3GBXnJscEClUUOKduWTmm2zCnvNLC1hc3JpuXjs/nFOc5w==", + "dev": true + }, + "node_modules/@tootallnate/once": { + "version": "1.1.2", + "resolved": "https://registry.npmmirror.com/@tootallnate/once/-/once-1.1.2.tgz", + "integrity": "sha512-RbzJvlNzmRq5c3O09UipeuXno4tA1FE6ikOjxZK0tuxVv3412l64l5t1W5pj4+rJq9vpkm/kwiR07aZXnsKPxw==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, + "node_modules/@trysound/sax": { + "version": "0.2.0", + "resolved": "https://registry.npmmirror.com/@trysound/sax/-/sax-0.2.0.tgz", + "integrity": "sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==", + "dev": true, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/@types/accepts": { + "version": "1.3.7", + "resolved": "https://registry.npmmirror.com/@types/accepts/-/accepts-1.3.7.tgz", + "integrity": "sha512-Pay9fq2lM2wXPWbteBsRAGiWH2hig4ZE2asK+mm7kUzlxRTfL961rj89I6zV/E3PcIkDqyuBEcMxFT7rccugeQ==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmmirror.com/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.6.8", + "resolved": "https://registry.npmmirror.com/@types/babel__generator/-/babel__generator-7.6.8.tgz", + "integrity": "sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw==", + "dev": true, + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmmirror.com/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.20.5", + "resolved": "https://registry.npmmirror.com/@types/babel__traverse/-/babel__traverse-7.20.5.tgz", + "integrity": "sha512-WXCyOcRtH37HAUkpXhUduaxdm82b4GSlyTqajXviN4EfiuPgNYR109xMCKvpl6zPIpua0DGlMEDCq+g8EdoheQ==", + "dev": true, + "dependencies": { + "@babel/types": "^7.20.7" + } + }, + "node_modules/@types/body-parser": { + "version": "1.19.2", + "resolved": "https://registry.npmmirror.com/@types/body-parser/-/body-parser-1.19.2.tgz", + "integrity": "sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==", + "dependencies": { + "@types/connect": "*", + "@types/node": "*" + } + }, + "node_modules/@types/bonjour": { + "version": "3.5.13", + "resolved": "https://registry.npmmirror.com/@types/bonjour/-/bonjour-3.5.13.tgz", + "integrity": "sha512-z9fJ5Im06zvUL548KvYNecEVlA7cVDkGUi6kZusb04mpyEFKCIZJvloCcmpmLaIahDpOQGHaHmG6imtPMmPXGQ==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect": { + "version": "3.4.38", + "resolved": "https://registry.npmmirror.com/@types/connect/-/connect-3.4.38.tgz", + "integrity": "sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect-history-api-fallback": { + "version": "1.5.4", + "resolved": "https://registry.npmmirror.com/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.5.4.tgz", + "integrity": "sha512-n6Cr2xS1h4uAulPRdlw6Jl6s1oG8KrVilPN2yUITEs+K48EzMJJ3W1xy8K5eWuFvjp3R74AOIGSmp2UfBJ8HFw==", + "dev": true, + "dependencies": { + "@types/express-serve-static-core": "*", + "@types/node": "*" + } + }, + "node_modules/@types/cors": { + "version": "2.8.12", + "resolved": "https://registry.npmmirror.com/@types/cors/-/cors-2.8.12.tgz", + "integrity": "sha512-vt+kDhq/M2ayberEtJcIN/hxXy1Pk+59g2FV/ZQceeaTyCtCucjL2Q7FXlFjtWn4n15KCr1NE2lNNFhp0lEThw==" + }, + "node_modules/@types/ejs": { + "version": "3.1.5", + "resolved": "https://registry.npmmirror.com/@types/ejs/-/ejs-3.1.5.tgz", + "integrity": "sha512-nv+GSx77ZtXiJzwKdsASqi+YQ5Z7vwHsTP0JY2SiQgjGckkBRKZnk8nIM+7oUZ1VCtuTz0+By4qVR7fqzp/Dfg==" + }, + "node_modules/@types/eslint": { + "version": "8.56.10", + "resolved": "https://registry.npmmirror.com/@types/eslint/-/eslint-8.56.10.tgz", + "integrity": "sha512-Shavhk87gCtY2fhXDctcfS3e6FdxWkCx1iUZ9eEUbh7rTqlZT0/IzOkCOVt0fCjcFuZ9FPYfuezTBImfHCDBGQ==", + "dependencies": { + "@types/estree": "*", + "@types/json-schema": "*" + } + }, + "node_modules/@types/eslint-scope": { + "version": "3.7.7", + "resolved": "https://registry.npmmirror.com/@types/eslint-scope/-/eslint-scope-3.7.7.tgz", + "integrity": "sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==", + "dependencies": { + "@types/eslint": "*", + "@types/estree": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.5", + "resolved": "https://registry.npmmirror.com/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==" + }, + "node_modules/@types/express": { + "version": "4.17.14", + "resolved": "https://registry.npmmirror.com/@types/express/-/express-4.17.14.tgz", + "integrity": "sha512-TEbt+vaPFQ+xpxFLFssxUDXj5cWCxZJjIcB7Yg0k0GMHGtgtQgpvx/MUQUeAkNbA9AAGrwkAsoeItdTgS7FMyg==", + "dependencies": { + "@types/body-parser": "*", + "@types/express-serve-static-core": "^4.17.18", + "@types/qs": "*", + "@types/serve-static": "*" + } + }, + "node_modules/@types/express-serve-static-core": { + "version": "4.17.31", + "resolved": "https://registry.npmmirror.com/@types/express-serve-static-core/-/express-serve-static-core-4.17.31.tgz", + "integrity": "sha512-DxMhY+NAsTwMMFHBTtJFNp5qiHKJ7TeqOo23zVEM9alT1Ml27Q3xcTH0xwxn7Q0BbMcVEJOs/7aQtUWupUQN3Q==", + "dependencies": { + "@types/node": "*", + "@types/qs": "*", + "@types/range-parser": "*" + } + }, + "node_modules/@types/graceful-fs": { + "version": "4.1.9", + "resolved": "https://registry.npmmirror.com/@types/graceful-fs/-/graceful-fs-4.1.9.tgz", + "integrity": "sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmmirror.com/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg==", + "dev": true + }, + "node_modules/@types/http-errors": { + "version": "2.0.4", + "resolved": "https://registry.npmmirror.com/@types/http-errors/-/http-errors-2.0.4.tgz", + "integrity": "sha512-D0CFMMtydbJAegzOyHjtiKPLlvnm3iTZyZRSZoLq2mRhDdmLfIWOCYPfQJ4cu2erKghU++QvjcUjp/5h7hESpA==" + }, + "node_modules/@types/http-proxy": { + "version": "1.17.14", + "resolved": "https://registry.npmmirror.com/@types/http-proxy/-/http-proxy-1.17.14.tgz", + "integrity": "sha512-SSrD0c1OQzlFX7pGu1eXxSEjemej64aaNPRhhVYUGqXh0BtldAAx37MG8btcumvpgKyZp1F5Gn3JkktdxiFv6w==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/inquirer": { + "version": "8.2.10", + "resolved": "https://registry.npmmirror.com/@types/inquirer/-/inquirer-8.2.10.tgz", + "integrity": "sha512-IdD5NmHyVjWM8SHWo/kPBgtzXatwPkfwzyP3fN1jF2g9BWt5WO+8hL2F4o2GKIYsU40PpqeevuUWvkS/roXJkA==", + "dependencies": { + "@types/through": "*", + "rxjs": "^7.2.0" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.6", + "resolved": "https://registry.npmmirror.com/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", + "dev": true + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.3", + "resolved": "https://registry.npmmirror.com/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", + "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", + "dev": true, + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.4", + "resolved": "https://registry.npmmirror.com/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", + "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", + "dev": true, + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/jest": { + "version": "27.5.2", + "resolved": "https://registry.npmmirror.com/@types/jest/-/jest-27.5.2.tgz", + "integrity": "sha512-mpT8LJJ4CMeeahobofYWIjFo0xonRS/HfxnVEPMPFSQdGUt1uHCnoPT7Zhb+sjDU2wz0oKV0OLUR0WzrHNgfeA==", + "dev": true, + "dependencies": { + "jest-matcher-utils": "^27.0.0", + "pretty-format": "^27.0.0" + } + }, + "node_modules/@types/jscodeshift": { + "version": "0.7.2", + "resolved": "https://registry.npmmirror.com/@types/jscodeshift/-/jscodeshift-0.7.2.tgz", + "integrity": "sha512-k4ih8ayQ65e26vhCxeMTKtZ808DzC0RFQ4unBvPEy9bcFhS4aPm3oXgWWZNmZ4u+H2WzHQDCNrRC5iNX+afiZw==", + "dependencies": { + "ast-types": "0.12.1", + "recast": "0.17.2" + } + }, + "node_modules/@types/jscodeshift/node_modules/ast-types": { + "version": "0.12.1", + "resolved": "https://registry.npmmirror.com/ast-types/-/ast-types-0.12.1.tgz", + "integrity": "sha512-H2izJAyT2xwew4TxShpmxe6f9R5hHgJQy1QloLiUC2yrJMtyraBWNJL7903rpeCY9keNUipORR/zIUC2XcYKng==", + "engines": { + "node": ">=4" + } + }, + "node_modules/@types/jscodeshift/node_modules/recast": { + "version": "0.17.2", + "resolved": "https://registry.npmmirror.com/recast/-/recast-0.17.2.tgz", + "integrity": "sha512-YHFvn4rBXl8eIjALjUiOV/AP3xFpyGNGNHDw9mAncAWuIdgnBKjbZQ9+P3VlsKcNaNapRVFlTEX1dvDRlYwyxg==", + "dependencies": { + "ast-types": "0.12.1", + "esprima": "~4.0.0", + "private": "~0.1.5", + "source-map": "~0.6.1" + }, + "engines": { + "node": ">= 4" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmmirror.com/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==" + }, + "node_modules/@types/lodash": { + "version": "4.17.4", + "resolved": "https://registry.npmmirror.com/@types/lodash/-/lodash-4.17.4.tgz", + "integrity": "sha512-wYCP26ZLxaT3R39kiN2+HcJ4kTd3U1waI/cY7ivWYqFP6pW3ZNpvi6Wd6PHZx7T/t8z0vlkXMg3QYLa7DZ/IJQ==" + }, + "node_modules/@types/lodash-es": { + "version": "4.17.12", + "resolved": "https://registry.npmmirror.com/@types/lodash-es/-/lodash-es-4.17.12.tgz", + "integrity": "sha512-0NgftHUcV4v34VhXm8QBSftKVXtbkBG3ViCjs6+eJ5a6y6Mi/jiFGPc1sC7QK+9BFhWrURE3EOggmWaSxL9OzQ==", + "dependencies": { + "@types/lodash": "*" + } + }, + "node_modules/@types/long": { + "version": "4.0.2", + "resolved": "https://registry.npmmirror.com/@types/long/-/long-4.0.2.tgz", + "integrity": "sha512-MqTGEo5bj5t157U6fA/BiDynNkn0YknVdh48CMPkTSpFTVmvao5UQmm7uEF6xBEo7qIMAlY/JSleYaE6VOdpaA==" + }, + "node_modules/@types/mime": { + "version": "1.3.5", + "resolved": "https://registry.npmmirror.com/@types/mime/-/mime-1.3.5.tgz", + "integrity": "sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==" + }, + "node_modules/@types/minimist": { + "version": "1.2.5", + "resolved": "https://registry.npmmirror.com/@types/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-hov8bUuiLiyFPGyFPE1lwWhmzYbirOXQNNo40+y3zow8aFVTeyn3VWL0VFFfdNddA8S4Vf0Tc062rzyNr7Paag==", + "dev": true + }, + "node_modules/@types/node": { + "version": "20.12.12", + "resolved": "https://registry.npmmirror.com/@types/node/-/node-20.12.12.tgz", + "integrity": "sha512-eWLDGF/FOSPtAvEqeRAQ4C8LSA7M1I7i0ky1I8U7kD1J5ITyW3AsRhQrKVoWf5pFKZ2kILsEGJhsI9r93PYnOw==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/@types/node-forge": { + "version": "1.3.11", + "resolved": "https://registry.npmmirror.com/@types/node-forge/-/node-forge-1.3.11.tgz", + "integrity": "sha512-FQx220y22OKNTqaByeBGqHWYz4cl94tpcxeFdvBo3wjG6XPBuZ0BNgNZRV5J5TFmmcsJ4IzsLkmGRiQbnYsBEQ==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/normalize-package-data": { + "version": "2.4.4", + "resolved": "https://registry.npmmirror.com/@types/normalize-package-data/-/normalize-package-data-2.4.4.tgz", + "integrity": "sha512-37i+OaWTh9qeK4LSHPsyRC7NahnGotNuZvjLSgcPzblpHB3rrCJxAOgI5gCdKm7coonsaX1Of0ILiTcnZjbfxA==" + }, + "node_modules/@types/parse-json": { + "version": "4.0.2", + "resolved": "https://registry.npmmirror.com/@types/parse-json/-/parse-json-4.0.2.tgz", + "integrity": "sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==", + "dev": true + }, + "node_modules/@types/pdfjs-dist": { + "version": "2.10.378", + "resolved": "https://registry.npmmirror.com/@types/pdfjs-dist/-/pdfjs-dist-2.10.378.tgz", + "integrity": "sha512-TRdIPqdsvKmPla44kVy4jv5Nt5vjMfVjbIEke1CRULIrwKNRC4lIiZvNYDJvbUMNCFPNIUcOKhXTyMJrX18IMA==", + "deprecated": "This is a stub types definition. pdfjs-dist provides its own type definitions, so you do not need this installed.", + "dependencies": { + "pdfjs-dist": "*" + } + }, + "node_modules/@types/pdfobject": { + "version": "2.2.5", + "resolved": "https://registry.npmmirror.com/@types/pdfobject/-/pdfobject-2.2.5.tgz", + "integrity": "sha512-7gD5tqc/RUDq0PyoLemL0vEHxBYi+zY0WVaFAx/Y0jBsXFgot1vB9No1GhDZGwRGJMCIZbgAb74QG9MTyTNU/g==", + "dev": true + }, + "node_modules/@types/prettier": { + "version": "2.7.3", + "resolved": "https://registry.npmmirror.com/@types/prettier/-/prettier-2.7.3.tgz", + "integrity": "sha512-+68kP9yzs4LMp7VNh8gdzMSPZFL44MLGqiHWvttYJe+6qnuVr4Ek9wSBQoveqY/r+LwjCcU29kNVkidwim+kYA==", + "dev": true + }, + "node_modules/@types/qs": { + "version": "6.9.15", + "resolved": "https://registry.npmmirror.com/@types/qs/-/qs-6.9.15.tgz", + "integrity": "sha512-uXHQKES6DQKKCLh441Xv/dwxOq1TVS3JPUMlEqoEglvlhR6Mxnlew/Xq/LRVHpLyk7iK3zODe1qYHIMltO7XGg==" + }, + "node_modules/@types/range-parser": { + "version": "1.2.7", + "resolved": "https://registry.npmmirror.com/@types/range-parser/-/range-parser-1.2.7.tgz", + "integrity": "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==" + }, + "node_modules/@types/retry": { + "version": "0.12.0", + "resolved": "https://registry.npmmirror.com/@types/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==", + "dev": true + }, + "node_modules/@types/semver": { + "version": "7.5.8", + "resolved": "https://registry.npmmirror.com/@types/semver/-/semver-7.5.8.tgz", + "integrity": "sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ==", + "dev": true + }, + "node_modules/@types/send": { + "version": "0.17.4", + "resolved": "https://registry.npmmirror.com/@types/send/-/send-0.17.4.tgz", + "integrity": "sha512-x2EM6TJOybec7c52BX0ZspPodMsQUd5L6PRwOunVyVUhXiBSKf3AezDL8Dgvgt5o0UfKNfuA0eMLr2wLT4AiBA==", + "dependencies": { + "@types/mime": "^1", + "@types/node": "*" + } + }, + "node_modules/@types/serve-index": { + "version": "1.9.4", + "resolved": "https://registry.npmmirror.com/@types/serve-index/-/serve-index-1.9.4.tgz", + "integrity": "sha512-qLpGZ/c2fhSs5gnYsQxtDEq3Oy8SXPClIXkW5ghvAvsNuVSA8k+gCONcUCS/UjLEYvYps+e8uBtfgXgvhwfNug==", + "dev": true, + "dependencies": { + "@types/express": "*" + } + }, + "node_modules/@types/serve-static": { + "version": "1.15.7", + "resolved": "https://registry.npmmirror.com/@types/serve-static/-/serve-static-1.15.7.tgz", + "integrity": "sha512-W8Ym+h8nhuRwaKPaDw34QUkwsGi6Rc4yYqvKFo5rm2FUEhCFbzVWrxXUxuKK8TASjWsysJY0nsmNCGhCOIsrOw==", + "dependencies": { + "@types/http-errors": "*", + "@types/node": "*", + "@types/send": "*" + } + }, + "node_modules/@types/sockjs": { + "version": "0.3.36", + "resolved": "https://registry.npmmirror.com/@types/sockjs/-/sockjs-0.3.36.tgz", + "integrity": "sha512-MK9V6NzAS1+Ud7JV9lJLFqW85VbC9dq3LmwZCuBe4wBDgKC0Kj/jd8Xl+nSviU+Qc3+m7umHHyHg//2KSa0a0Q==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/stack-utils": { + "version": "2.0.3", + "resolved": "https://registry.npmmirror.com/@types/stack-utils/-/stack-utils-2.0.3.tgz", + "integrity": "sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==", + "dev": true + }, + "node_modules/@types/strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/@types/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha512-xevGOReSYGM7g/kUBZzPqCrR/KYAo+F0yiPc85WFTJa0MSLtyFTVTU6cJu/aV4mid7IffDIWqo69THF2o4JiEQ==", + "dev": true + }, + "node_modules/@types/strip-json-comments": { + "version": "0.0.30", + "resolved": "https://registry.npmmirror.com/@types/strip-json-comments/-/strip-json-comments-0.0.30.tgz", + "integrity": "sha512-7NQmHra/JILCd1QqpSzl8+mJRc8ZHz3uDm8YV1Ks9IhK0epEiTw8aIErbvH9PI+6XbqhyIQy3462nEsn7UVzjQ==", + "dev": true + }, + "node_modules/@types/through": { + "version": "0.0.33", + "resolved": "https://registry.npmmirror.com/@types/through/-/through-0.0.33.tgz", + "integrity": "sha512-HsJ+z3QuETzP3cswwtzt2vEIiHBk/dCcHGhbmG5X3ecnwFD/lPrMpliGXxSCg03L9AhrdwA4Oz/qfspkDW+xGQ==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/web-bluetooth": { + "version": "0.0.16", + "resolved": "https://registry.npmmirror.com/@types/web-bluetooth/-/web-bluetooth-0.0.16.tgz", + "integrity": "sha512-oh8q2Zc32S6gd/j50GowEjKLoOVOwHP/bWVjKJInBwQqdOYMdPrf1oVlelTlyfFK3CKxL1uahMDAr+vy8T7yMQ==" + }, + "node_modules/@types/webpack-env": { + "version": "1.18.5", + "resolved": "https://registry.npmmirror.com/@types/webpack-env/-/webpack-env-1.18.5.tgz", + "integrity": "sha512-wz7kjjRRj8/Lty4B+Kr0LN6Ypc/3SymeCCGSbaXp2leH0ZVg/PriNiOwNj4bD4uphI7A8NXS4b6Gl373sfO5mA==", + "dev": true + }, + "node_modules/@types/websocket": { + "version": "1.0.10", + "resolved": "https://registry.npmmirror.com/@types/websocket/-/websocket-1.0.10.tgz", + "integrity": "sha512-svjGZvPB7EzuYS94cI7a+qhwgGU1y89wUgjT6E2wVUfmAGIvRfT7obBvRtnhXCSsoMdlG4gBFGE7MfkIXZLoww==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/ws": { + "version": "8.5.10", + "resolved": "https://registry.npmmirror.com/@types/ws/-/ws-8.5.10.tgz", + "integrity": "sha512-vmQSUcfalpIq0R9q7uTo2lXs6eGIpt9wtnLdMv9LVpIjCA/+ufZRozlVoVelIYixx1ugCBKDhn89vnsEGOCx9A==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/yargs": { + "version": "16.0.9", + "resolved": "https://registry.npmmirror.com/@types/yargs/-/yargs-16.0.9.tgz", + "integrity": "sha512-tHhzvkFXZQeTECenFoRljLBYPZJ7jAVxqqtEI0qTLOmuultnFp4I9yKE17vTuhf7BkhCu7I4XuemPgikDVuYqA==", + "dev": true, + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.3", + "resolved": "https://registry.npmmirror.com/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", + "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", + "dev": true + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "5.62.0", + "resolved": "https://registry.npmmirror.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.62.0.tgz", + "integrity": "sha512-TiZzBSJja/LbhNPvk6yc0JrX9XqhQ0hdh6M2svYfsHGejaKFIAGd9MQ+ERIMzLGlN/kZoYIgdxFV0PuljTKXag==", + "dev": true, + "dependencies": { + "@eslint-community/regexpp": "^4.4.0", + "@typescript-eslint/scope-manager": "5.62.0", + "@typescript-eslint/type-utils": "5.62.0", + "@typescript-eslint/utils": "5.62.0", + "debug": "^4.3.4", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "natural-compare-lite": "^1.4.0", + "semver": "^7.3.7", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^5.0.0", + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "5.62.0", + "resolved": "https://registry.npmmirror.com/@typescript-eslint/parser/-/parser-5.62.0.tgz", + "integrity": "sha512-VlJEV0fOQ7BExOsHYAGrgbEiZoi8D+Bl2+f6V2RrXerRSylnp+ZBHmPvaIa8cz0Ajx7WO7Z5RqfgYg7ED1nRhA==", + "dev": true, + "dependencies": { + "@typescript-eslint/scope-manager": "5.62.0", + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/typescript-estree": "5.62.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "5.62.0", + "resolved": "https://registry.npmmirror.com/@typescript-eslint/scope-manager/-/scope-manager-5.62.0.tgz", + "integrity": "sha512-VXuvVvZeQCQb5Zgf4HAxc04q5j+WrNAtNh9OwCsCgpKqESMTu3tF/jhZ3xG6T4NZwWl65Bg8KuS2uEvhSfLl0w==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/visitor-keys": "5.62.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "5.62.0", + "resolved": "https://registry.npmmirror.com/@typescript-eslint/type-utils/-/type-utils-5.62.0.tgz", + "integrity": "sha512-xsSQreu+VnfbqQpW5vnCJdq1Z3Q0U31qiWmRhr98ONQmcp/yhiPJFPq8MXiJVLiksmOKSjIldZzkebzHuCGzew==", + "dev": true, + "dependencies": { + "@typescript-eslint/typescript-estree": "5.62.0", + "@typescript-eslint/utils": "5.62.0", + "debug": "^4.3.4", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "*" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/types": { + "version": "5.62.0", + "resolved": "https://registry.npmmirror.com/@typescript-eslint/types/-/types-5.62.0.tgz", + "integrity": "sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "5.62.0", + "resolved": "https://registry.npmmirror.com/@typescript-eslint/typescript-estree/-/typescript-estree-5.62.0.tgz", + "integrity": "sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/visitor-keys": "5.62.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "semver": "^7.3.7", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "5.62.0", + "resolved": "https://registry.npmmirror.com/@typescript-eslint/utils/-/utils-5.62.0.tgz", + "integrity": "sha512-n8oxjeb5aIbPFEtmQxQYOLI0i9n5ySBEY/ZEHHZqKQSFnxio1rv6dthascc9dLuwrL0RC5mPCxB7vnAVGAYWAQ==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@types/json-schema": "^7.0.9", + "@types/semver": "^7.3.12", + "@typescript-eslint/scope-manager": "5.62.0", + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/typescript-estree": "5.62.0", + "eslint-scope": "^5.1.1", + "semver": "^7.3.7" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "5.62.0", + "resolved": "https://registry.npmmirror.com/@typescript-eslint/visitor-keys/-/visitor-keys-5.62.0.tgz", + "integrity": "sha512-07ny+LHRzQXepkGg6w0mFY41fVUNBrL2Roj/++7V1txKugfjm/Ci/qSND03r2RhlJhJYMcTn9AhhSSqQp0Ysyw==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.62.0", + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@vue/cli": { + "version": "5.0.8", + "resolved": "https://registry.npmmirror.com/@vue/cli/-/cli-5.0.8.tgz", + "integrity": "sha512-c/QKPdC09bYkW22m/boXkLaiz10z0Z2WHZO7zEeNdfSduqyWINZhKc6hVQU3Vk0NXW7BJAd7zWmcUrC8L9TuAA==", + "dependencies": { + "@types/ejs": "^3.0.6", + "@types/inquirer": "^8.1.3", + "@vue/cli-shared-utils": "^5.0.8", + "@vue/cli-ui": "^5.0.8", + "@vue/cli-ui-addon-webpack": "^5.0.8", + "@vue/cli-ui-addon-widgets": "^5.0.8", + "boxen": "^5.0.0", + "commander": "^7.1.0", + "debug": "^4.1.0", + "deepmerge": "^4.2.2", + "download-git-repo": "^3.0.2", + "ejs": "^3.1.6", + "envinfo": "^7.7.4", + "fs-extra": "^9.1.0", + "globby": "^11.0.2", + "import-global": "^0.1.0", + "ini": "^2.0.0", + "inquirer": "^8.0.0", + "isbinaryfile": "^4.0.6", + "javascript-stringify": "^2.0.1", + "js-yaml": "^4.0.0", + "leven": "^3.1.0", + "lodash.clonedeep": "^4.5.0", + "lru-cache": "^6.0.0", + "minimist": "^1.2.5", + "pkg-dir": "^5.0.0", + "recast": "^0.20.3", + "resolve": "^1.20.0", + "shortid": "^2.2.15", + "slash": "^3.0.0", + "strip-ansi": "^6.0.0", + "validate-npm-package-name": "^3.0.0", + "vue": "^2.6.14", + "vue-codemod": "^0.0.5", + "yaml-front-matter": "^4.1.0" + }, + "bin": { + "vue": "bin/vue.js" + }, + "engines": { + "node": "^12.0.0 || >= 14.0.0" + } + }, + "node_modules/@vue/cli-overlay": { + "version": "5.0.8", + "resolved": "https://registry.npmmirror.com/@vue/cli-overlay/-/cli-overlay-5.0.8.tgz", + "integrity": "sha512-KmtievE/B4kcXp6SuM2gzsnSd8WebkQpg3XaB6GmFh1BJGRqa1UiW9up7L/Q67uOdTigHxr5Ar2lZms4RcDjwQ==", + "dev": true + }, + "node_modules/@vue/cli-plugin-eslint": { + "version": "5.0.8", + "resolved": "https://registry.npmmirror.com/@vue/cli-plugin-eslint/-/cli-plugin-eslint-5.0.8.tgz", + "integrity": "sha512-d11+I5ONYaAPW1KyZj9GlrV/E6HZePq5L5eAF5GgoVdu6sxr6bDgEoxzhcS1Pk2eh8rn1MxG/FyyR+eCBj/CNg==", + "dev": true, + "dependencies": { + "@vue/cli-shared-utils": "^5.0.8", + "eslint-webpack-plugin": "^3.1.0", + "globby": "^11.0.2", + "webpack": "^5.54.0", + "yorkie": "^2.0.0" + }, + "peerDependencies": { + "@vue/cli-service": "^3.0.0 || ^4.0.0 || ^5.0.0-0", + "eslint": ">=7.5.0" + } + }, + "node_modules/@vue/cli-plugin-router": { + "version": "5.0.8", + "resolved": "https://registry.npmmirror.com/@vue/cli-plugin-router/-/cli-plugin-router-5.0.8.tgz", + "integrity": "sha512-Gmv4dsGdAsWPqVijz3Ux2OS2HkMrWi1ENj2cYL75nUeL+Xj5HEstSqdtfZ0b1q9NCce+BFB6QnHfTBXc/fCvMg==", + "dev": true, + "dependencies": { + "@vue/cli-shared-utils": "^5.0.8" + }, + "peerDependencies": { + "@vue/cli-service": "^3.0.0 || ^4.0.0 || ^5.0.0-0" + } + }, + "node_modules/@vue/cli-plugin-typescript": { + "version": "5.0.8", + "resolved": "https://registry.npmmirror.com/@vue/cli-plugin-typescript/-/cli-plugin-typescript-5.0.8.tgz", + "integrity": "sha512-JKJOwzJshBqsmp4yLBexwVMebOZ4VGJgbnYvmHVxasJOStF2RxwyW28ZF+zIvASGdat4sAUuo/3mAQyVhm7JHg==", + "dev": true, + "dependencies": { + "@babel/core": "^7.12.16", + "@types/webpack-env": "^1.15.2", + "@vue/cli-shared-utils": "^5.0.8", + "babel-loader": "^8.2.2", + "fork-ts-checker-webpack-plugin": "^6.4.0", + "globby": "^11.0.2", + "thread-loader": "^3.0.0", + "ts-loader": "^9.2.5", + "webpack": "^5.54.0" + }, + "peerDependencies": { + "@vue/cli-service": "^3.0.0 || ^4.0.0 || ^5.0.0-0", + "cache-loader": "^4.1.0", + "typescript": ">=2", + "vue": "^2 || ^3.2.13", + "vue-template-compiler": "^2.0.0" + }, + "peerDependenciesMeta": { + "cache-loader": { + "optional": true + }, + "vue-template-compiler": { + "optional": true + } + } + }, + "node_modules/@vue/cli-plugin-unit-jest": { + "version": "5.0.8", + "resolved": "https://registry.npmmirror.com/@vue/cli-plugin-unit-jest/-/cli-plugin-unit-jest-5.0.8.tgz", + "integrity": "sha512-8aTmXUxEUdhJEjMHHoHI1wgi2SHzVRgCQQWIn5lgCAV2xJnXng09+wv8Ap0dhO4Z5vOOA/7xnubMQ9pDLqiskg==", + "dev": true, + "dependencies": { + "@babel/core": "^7.12.16", + "@babel/plugin-transform-modules-commonjs": "^7.15.0", + "@types/jest": "^27.0.1", + "@vue/cli-shared-utils": "^5.0.8", + "babel-jest": "^27.1.0", + "deepmerge": "^4.2.2", + "jest": "^27.1.0", + "jest-serializer-vue": "^2.0.2", + "jest-transform-stub": "^2.0.0", + "jest-watch-typeahead": "^1.0.0" + }, + "peerDependencies": { + "@vue/cli-service": "^3.0.0 || ^4.0.0 || ^5.0.0-0", + "@vue/vue2-jest": "^27.0.0-alpha.3", + "@vue/vue3-jest": "^27.0.0-alpha.3", + "jest": "^27.1.0", + "ts-jest": "^27.0.4" + }, + "peerDependenciesMeta": { + "@vue/vue2-jest": { + "optional": true + }, + "@vue/vue3-jest": { + "optional": true + }, + "ts-jest": { + "optional": true + } + } + }, + "node_modules/@vue/cli-plugin-vuex": { + "version": "5.0.8", + "resolved": "https://registry.npmmirror.com/@vue/cli-plugin-vuex/-/cli-plugin-vuex-5.0.8.tgz", + "integrity": "sha512-HSYWPqrunRE5ZZs8kVwiY6oWcn95qf/OQabwLfprhdpFWAGtLStShjsGED2aDpSSeGAskQETrtR/5h7VqgIlBA==", + "dev": true, + "peerDependencies": { + "@vue/cli-service": "^3.0.0 || ^4.0.0 || ^5.0.0-0" + } + }, + "node_modules/@vue/cli-service": { + "version": "5.0.8", + "resolved": "https://registry.npmmirror.com/@vue/cli-service/-/cli-service-5.0.8.tgz", + "integrity": "sha512-nV7tYQLe7YsTtzFrfOMIHc5N2hp5lHG2rpYr0aNja9rNljdgcPZLyQRb2YRivTHqTv7lI962UXFURcpStHgyFw==", + "dev": true, + "dependencies": { + "@babel/helper-compilation-targets": "^7.12.16", + "@soda/friendly-errors-webpack-plugin": "^1.8.0", + "@soda/get-current-script": "^1.0.2", + "@types/minimist": "^1.2.0", + "@vue/cli-overlay": "^5.0.8", + "@vue/cli-plugin-router": "^5.0.8", + "@vue/cli-plugin-vuex": "^5.0.8", + "@vue/cli-shared-utils": "^5.0.8", + "@vue/component-compiler-utils": "^3.3.0", + "@vue/vue-loader-v15": "npm:vue-loader@^15.9.7", + "@vue/web-component-wrapper": "^1.3.0", + "acorn": "^8.0.5", + "acorn-walk": "^8.0.2", + "address": "^1.1.2", + "autoprefixer": "^10.2.4", + "browserslist": "^4.16.3", + "case-sensitive-paths-webpack-plugin": "^2.3.0", + "cli-highlight": "^2.1.10", + "clipboardy": "^2.3.0", + "cliui": "^7.0.4", + "copy-webpack-plugin": "^9.0.1", + "css-loader": "^6.5.0", + "css-minimizer-webpack-plugin": "^3.0.2", + "cssnano": "^5.0.0", + "debug": "^4.1.1", + "default-gateway": "^6.0.3", + "dotenv": "^10.0.0", + "dotenv-expand": "^5.1.0", + "fs-extra": "^9.1.0", + "globby": "^11.0.2", + "hash-sum": "^2.0.0", + "html-webpack-plugin": "^5.1.0", + "is-file-esm": "^1.0.0", + "launch-editor-middleware": "^2.2.1", + "lodash.defaultsdeep": "^4.6.1", + "lodash.mapvalues": "^4.6.0", + "mini-css-extract-plugin": "^2.5.3", + "minimist": "^1.2.5", + "module-alias": "^2.2.2", + "portfinder": "^1.0.26", + "postcss": "^8.2.6", + "postcss-loader": "^6.1.1", + "progress-webpack-plugin": "^1.0.12", + "ssri": "^8.0.1", + "terser-webpack-plugin": "^5.1.1", + "thread-loader": "^3.0.0", + "vue-loader": "^17.0.0", + "vue-style-loader": "^4.1.3", + "webpack": "^5.54.0", + "webpack-bundle-analyzer": "^4.4.0", + "webpack-chain": "^6.5.1", + "webpack-dev-server": "^4.7.3", + "webpack-merge": "^5.7.3", + "webpack-virtual-modules": "^0.4.2", + "whatwg-fetch": "^3.6.2" + }, + "bin": { + "vue-cli-service": "bin/vue-cli-service.js" + }, + "engines": { + "node": "^12.0.0 || >= 14.0.0" + }, + "peerDependencies": { + "vue-template-compiler": "^2.0.0", + "webpack-sources": "*" + }, + "peerDependenciesMeta": { + "cache-loader": { + "optional": true + }, + "less-loader": { + "optional": true + }, + "pug-plain-loader": { + "optional": true + }, + "raw-loader": { + "optional": true + }, + "sass-loader": { + "optional": true + }, + "stylus-loader": { + "optional": true + }, + "vue-template-compiler": { + "optional": true + }, + "webpack-sources": { + "optional": true + } + } + }, + "node_modules/@vue/cli-shared-utils": { + "version": "5.0.8", + "resolved": "https://registry.npmmirror.com/@vue/cli-shared-utils/-/cli-shared-utils-5.0.8.tgz", + "integrity": "sha512-uK2YB7bBVuQhjOJF+O52P9yFMXeJVj7ozqJkwYE9PlMHL1LMHjtCYm4cSdOebuPzyP+/9p0BimM/OqxsevIopQ==", + "dependencies": { + "@achrinza/node-ipc": "^9.2.5", + "chalk": "^4.1.2", + "execa": "^1.0.0", + "joi": "^17.4.0", + "launch-editor": "^2.2.1", + "lru-cache": "^6.0.0", + "node-fetch": "^2.6.7", + "open": "^8.0.2", + "ora": "^5.3.0", + "read-pkg": "^5.1.1", + "semver": "^7.3.4", + "strip-ansi": "^6.0.0" + } + }, + "node_modules/@vue/cli-shared-utils/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@vue/cli-shared-utils/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/@vue/cli-shared-utils/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/@vue/cli-shared-utils/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/@vue/cli-shared-utils/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/@vue/cli-shared-utils/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@vue/cli-shared-utils/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@vue/cli-shared-utils/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@vue/cli-shared-utils/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + }, + "node_modules/@vue/cli-ui": { + "version": "5.0.8", + "resolved": "https://registry.npmmirror.com/@vue/cli-ui/-/cli-ui-5.0.8.tgz", + "integrity": "sha512-1eyL1h1T3LVejYplDqERO8TK03sjR3QTOTHa01ABreCdqFTZItiUVud34uEcuoZ6Gi69xdl+LSx6Hvo4t9tfrA==", + "dependencies": { + "@achrinza/node-ipc": "^9.2.5", + "@akryum/winattr": "^3.0.0", + "@graphql-tools/schema": "^8.5.0", + "@vue/cli-shared-utils": "^5.0.8", + "apollo-server-express": "^3.9.0", + "clone": "^2.1.1", + "deepmerge": "^4.2.2", + "express": "^4.17.1", + "express-history-api-fallback": "^2.2.1", + "fkill": "^7.1.0", + "fs-extra": "^9.1.0", + "globby": "^11.0.2", + "graphql": "^15.5.0", + "graphql-subscriptions": "^1.2.0", + "graphql-tag": "^2.10.3", + "graphql-type-json": "^0.3.1", + "javascript-stringify": "^2.0.1", + "js-yaml": "^4.0.0", + "lodash.merge": "^4.6.1", + "lowdb": "^1.0.0", + "lru-cache": "^6.0.0", + "node-notifier": "^10.0.0", + "parse-git-config": "^3.0.0", + "portfinder": "^1.0.26", + "prismjs": "^1.23.0", + "rss-parser": "^3.11.0", + "shortid": "^2.2.15", + "subscriptions-transport-ws": "^0.11.0", + "typescript": "~4.5.5" + }, + "engines": { + "node": "^12.0.0 || >= 14.0.0" + } + }, + "node_modules/@vue/cli-ui-addon-webpack": { + "version": "5.0.8", + "resolved": "https://registry.npmmirror.com/@vue/cli-ui-addon-webpack/-/cli-ui-addon-webpack-5.0.8.tgz", + "integrity": "sha512-sg+3a9vHGzpFRrv7MVZRQ9oDztFN9Mvx0MleidKyPIAWMSOskSQT8zTngy8bEyXjXwNv6mCn2jvUR/tgbldyow==" + }, + "node_modules/@vue/cli-ui-addon-widgets": { + "version": "5.0.8", + "resolved": "https://registry.npmmirror.com/@vue/cli-ui-addon-widgets/-/cli-ui-addon-widgets-5.0.8.tgz", + "integrity": "sha512-jNYQ+3z7HDZ3IR3Z3Dlo3yOPbHexpygkn2IJ7sjA62oGolnNWeF7kvpLwni18l8N5InhS66m9w31an1Fs5pCZA==" + }, + "node_modules/@vue/cli-ui/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@vue/cli-ui/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + }, + "node_modules/@vue/cli/node_modules/@vue/compiler-sfc": { + "version": "2.7.16", + "resolved": "https://registry.npmmirror.com/@vue/compiler-sfc/-/compiler-sfc-2.7.16.tgz", + "integrity": "sha512-KWhJ9k5nXuNtygPU7+t1rX6baZeqOYLEforUPjgNDBnLicfHCoi48H87Q8XyLZOrNNsmhuwKqtpDQWjEFe6Ekg==", + "dependencies": { + "@babel/parser": "^7.23.5", + "postcss": "^8.4.14", + "source-map": "^0.6.1" + }, + "optionalDependencies": { + "prettier": "^1.18.2 || ^2.0.0" + } + }, + "node_modules/@vue/cli/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@vue/cli/node_modules/vue": { + "version": "2.7.16", + "resolved": "https://registry.npmmirror.com/vue/-/vue-2.7.16.tgz", + "integrity": "sha512-4gCtFXaAA3zYZdTp5s4Hl2sozuySsgz4jy1EnpBHNfpMa9dK1ZCG7viqBPCwXtmgc8nHqUsAu3G4gtmXkkY3Sw==", + "deprecated": "Vue 2 has reached EOL and is no longer actively maintained. See https://v2.vuejs.org/eol/ for more details.", + "dependencies": { + "@vue/compiler-sfc": "2.7.16", + "csstype": "^3.1.0" + } + }, + "node_modules/@vue/cli/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + }, + "node_modules/@vue/compiler-core": { + "version": "3.4.27", + "resolved": "https://registry.npmmirror.com/@vue/compiler-core/-/compiler-core-3.4.27.tgz", + "integrity": "sha512-E+RyqY24KnyDXsCuQrI+mlcdW3ALND6U7Gqa/+bVwbcpcR3BRRIckFoz7Qyd4TTlnugtwuI7YgjbvsLmxb+yvg==", + "dependencies": { + "@babel/parser": "^7.24.4", + "@vue/shared": "3.4.27", + "entities": "^4.5.0", + "estree-walker": "^2.0.2", + "source-map-js": "^1.2.0" + } + }, + "node_modules/@vue/compiler-core/node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmmirror.com/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/@vue/compiler-dom": { + "version": "3.4.27", + "resolved": "https://registry.npmmirror.com/@vue/compiler-dom/-/compiler-dom-3.4.27.tgz", + "integrity": "sha512-kUTvochG/oVgE1w5ViSr3KUBh9X7CWirebA3bezTbB5ZKBQZwR2Mwj9uoSKRMFcz4gSMzzLXBPD6KpCLb9nvWw==", + "dependencies": { + "@vue/compiler-core": "3.4.27", + "@vue/shared": "3.4.27" + } + }, + "node_modules/@vue/compiler-sfc": { + "version": "3.4.27", + "resolved": "https://registry.npmmirror.com/@vue/compiler-sfc/-/compiler-sfc-3.4.27.tgz", + "integrity": "sha512-nDwntUEADssW8e0rrmE0+OrONwmRlegDA1pD6QhVeXxjIytV03yDqTey9SBDiALsvAd5U4ZrEKbMyVXhX6mCGA==", + "dependencies": { + "@babel/parser": "^7.24.4", + "@vue/compiler-core": "3.4.27", + "@vue/compiler-dom": "3.4.27", + "@vue/compiler-ssr": "3.4.27", + "@vue/shared": "3.4.27", + "estree-walker": "^2.0.2", + "magic-string": "^0.30.10", + "postcss": "^8.4.38", + "source-map-js": "^1.2.0" + } + }, + "node_modules/@vue/compiler-ssr": { + "version": "3.4.27", + "resolved": "https://registry.npmmirror.com/@vue/compiler-ssr/-/compiler-ssr-3.4.27.tgz", + "integrity": "sha512-CVRzSJIltzMG5FcidsW0jKNQnNRYC8bT21VegyMMtHmhW3UOI7knmUehzswXLrExDLE6lQCZdrhD4ogI7c+vuw==", + "dependencies": { + "@vue/compiler-dom": "3.4.27", + "@vue/shared": "3.4.27" + } + }, + "node_modules/@vue/component-compiler-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmmirror.com/@vue/component-compiler-utils/-/component-compiler-utils-3.3.0.tgz", + "integrity": "sha512-97sfH2mYNU+2PzGrmK2haqffDpVASuib9/w2/noxiFi31Z54hW+q3izKQXXQZSNhtiUpAI36uSuYepeBe4wpHQ==", + "dev": true, + "dependencies": { + "consolidate": "^0.15.1", + "hash-sum": "^1.0.2", + "lru-cache": "^4.1.2", + "merge-source-map": "^1.1.0", + "postcss": "^7.0.36", + "postcss-selector-parser": "^6.0.2", + "source-map": "~0.6.1", + "vue-template-es2015-compiler": "^1.9.0" + }, + "optionalDependencies": { + "prettier": "^1.18.2 || ^2.0.0" + } + }, + "node_modules/@vue/component-compiler-utils/node_modules/hash-sum": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/hash-sum/-/hash-sum-1.0.2.tgz", + "integrity": "sha512-fUs4B4L+mlt8/XAtSOGMUO1TXmAelItBPtJG7CyHJfYTdDjwisntGO2JQz7oUsatOY9o68+57eziUVNw/mRHmA==", + "dev": true + }, + "node_modules/@vue/component-compiler-utils/node_modules/lru-cache": { + "version": "4.1.5", + "resolved": "https://registry.npmmirror.com/lru-cache/-/lru-cache-4.1.5.tgz", + "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", + "dev": true, + "dependencies": { + "pseudomap": "^1.0.2", + "yallist": "^2.1.2" + } + }, + "node_modules/@vue/component-compiler-utils/node_modules/picocolors": { + "version": "0.2.1", + "resolved": "https://registry.npmmirror.com/picocolors/-/picocolors-0.2.1.tgz", + "integrity": "sha512-cMlDqaLEqfSaW8Z7N5Jw+lyIW869EzT73/F5lhtY9cLGoVxSXznfgfXMO0Z5K0o0Q2TkTXq+0KFsdnSe3jDViA==", + "dev": true + }, + "node_modules/@vue/component-compiler-utils/node_modules/postcss": { + "version": "7.0.39", + "resolved": "https://registry.npmmirror.com/postcss/-/postcss-7.0.39.tgz", + "integrity": "sha512-yioayjNbHn6z1/Bywyb2Y4s3yvDAeXGOyxqD+LnVOinq6Mdmd++SW2wUNVzavyyHxd6+DxzWGIuosg6P1Rj8uA==", + "dev": true, + "dependencies": { + "picocolors": "^0.2.1", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + } + }, + "node_modules/@vue/component-compiler-utils/node_modules/yallist": { + "version": "2.1.2", + "resolved": "https://registry.npmmirror.com/yallist/-/yallist-2.1.2.tgz", + "integrity": "sha512-ncTzHV7NvsQZkYe1DW7cbDLm0YpzHmZF5r/iyP3ZnQtMiJ+pjzisCiMNI+Sj+xQF5pXhSHxSB3uDbsBTzY/c2A==", + "dev": true + }, + "node_modules/@vue/devtools-api": { + "version": "6.6.1", + "resolved": "https://registry.npmmirror.com/@vue/devtools-api/-/devtools-api-6.6.1.tgz", + "integrity": "sha512-LgPscpE3Vs0x96PzSSB4IGVSZXZBZHpfxs+ZA1d+VEPwHdOXowy/Y2CsvCAIFrf+ssVU1pD1jidj505EpUnfbA==" + }, + "node_modules/@vue/eslint-config-typescript": { + "version": "9.1.0", + "resolved": "https://registry.npmmirror.com/@vue/eslint-config-typescript/-/eslint-config-typescript-9.1.0.tgz", + "integrity": "sha512-j/852/ZYQ5wDvCD3HE2q4uqJwJAceer2FwoEch1nFo+zTOsPrbzbE3cuWIs3kvu5hdFsGTMYwRwjI6fqZKDMxQ==", + "dev": true, + "dependencies": { + "vue-eslint-parser": "^8.0.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "@typescript-eslint/eslint-plugin": "^5.0.0", + "@typescript-eslint/parser": "^5.0.0", + "eslint": "^6.2.0 || ^7.0.0 || ^8.0.0", + "eslint-plugin-vue": "^8.0.1" + } + }, + "node_modules/@vue/reactivity": { + "version": "3.4.27", + "resolved": "https://registry.npmmirror.com/@vue/reactivity/-/reactivity-3.4.27.tgz", + "integrity": "sha512-kK0g4NknW6JX2yySLpsm2jlunZJl2/RJGZ0H9ddHdfBVHcNzxmQ0sS0b09ipmBoQpY8JM2KmUw+a6sO8Zo+zIA==", + "dependencies": { + "@vue/shared": "3.4.27" + } + }, + "node_modules/@vue/runtime-core": { + "version": "3.4.27", + "resolved": "https://registry.npmmirror.com/@vue/runtime-core/-/runtime-core-3.4.27.tgz", + "integrity": "sha512-7aYA9GEbOOdviqVvcuweTLe5Za4qBZkUY7SvET6vE8kyypxVgaT1ixHLg4urtOlrApdgcdgHoTZCUuTGap/5WA==", + "dependencies": { + "@vue/reactivity": "3.4.27", + "@vue/shared": "3.4.27" + } + }, + "node_modules/@vue/runtime-dom": { + "version": "3.4.27", + "resolved": "https://registry.npmmirror.com/@vue/runtime-dom/-/runtime-dom-3.4.27.tgz", + "integrity": "sha512-ScOmP70/3NPM+TW9hvVAz6VWWtZJqkbdf7w6ySsws+EsqtHvkhxaWLecrTorFxsawelM5Ys9FnDEMt6BPBDS0Q==", + "dependencies": { + "@vue/runtime-core": "3.4.27", + "@vue/shared": "3.4.27", + "csstype": "^3.1.3" + } + }, + "node_modules/@vue/server-renderer": { + "version": "3.4.27", + "resolved": "https://registry.npmmirror.com/@vue/server-renderer/-/server-renderer-3.4.27.tgz", + "integrity": "sha512-dlAMEuvmeA3rJsOMJ2J1kXU7o7pOxgsNHVr9K8hB3ImIkSuBrIdy0vF66h8gf8Tuinf1TK3mPAz2+2sqyf3KzA==", + "dependencies": { + "@vue/compiler-ssr": "3.4.27", + "@vue/shared": "3.4.27" + }, + "peerDependencies": { + "vue": "3.4.27" + } + }, + "node_modules/@vue/shared": { + "version": "3.4.27", + "resolved": "https://registry.npmmirror.com/@vue/shared/-/shared-3.4.27.tgz", + "integrity": "sha512-DL3NmY2OFlqmYYrzp39yi3LDkKxa5vZVwxWdQ3rG0ekuWscHraeIbnI8t+aZK7qhYqEqWKTUdijadunb9pnrgA==" + }, + "node_modules/@vue/test-utils": { + "version": "2.4.6", + "resolved": "https://registry.npmmirror.com/@vue/test-utils/-/test-utils-2.4.6.tgz", + "integrity": "sha512-FMxEjOpYNYiFe0GkaHsnJPXFHxQ6m4t8vI/ElPGpMWxZKpmRvQ33OIrvRXemy6yha03RxhOlQuy+gZMC3CQSow==", + "dev": true, + "dependencies": { + "js-beautify": "^1.14.9", + "vue-component-type-helpers": "^2.0.0" + } + }, + "node_modules/@vue/vue-loader-v15": { + "name": "vue-loader", + "version": "15.11.1", + "resolved": "https://registry.npmmirror.com/vue-loader/-/vue-loader-15.11.1.tgz", + "integrity": "sha512-0iw4VchYLePqJfJu9s62ACWUXeSqM30SQqlIftbYWM3C+jpPcEHKSPUZBLjSF9au4HTHQ/naF6OGnO3Q/qGR3Q==", + "dev": true, + "dependencies": { + "@vue/component-compiler-utils": "^3.1.0", + "hash-sum": "^1.0.2", + "loader-utils": "^1.1.0", + "vue-hot-reload-api": "^2.3.0", + "vue-style-loader": "^4.1.0" + }, + "peerDependencies": { + "css-loader": "*", + "webpack": "^3.0.0 || ^4.1.0 || ^5.0.0-0" + }, + "peerDependenciesMeta": { + "cache-loader": { + "optional": true + }, + "prettier": { + "optional": true + }, + "vue-template-compiler": { + "optional": true + } + } + }, + "node_modules/@vue/vue-loader-v15/node_modules/hash-sum": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/hash-sum/-/hash-sum-1.0.2.tgz", + "integrity": "sha512-fUs4B4L+mlt8/XAtSOGMUO1TXmAelItBPtJG7CyHJfYTdDjwisntGO2JQz7oUsatOY9o68+57eziUVNw/mRHmA==", + "dev": true + }, + "node_modules/@vue/vue3-jest": { + "version": "27.0.0", + "resolved": "https://registry.npmmirror.com/@vue/vue3-jest/-/vue3-jest-27.0.0.tgz", + "integrity": "sha512-VL61CgZBoQqayXfzlZJHHpZuX4lsT8dmdZMJzADhdAJjKu26JBpypHr/2ppevxItljPiuALQW4MKhhCXZRXnLg==", + "dev": true, + "dependencies": { + "@babel/plugin-transform-modules-commonjs": "^7.2.0", + "chalk": "^2.1.0", + "convert-source-map": "^1.6.0", + "css-tree": "^2.0.1", + "source-map": "0.5.6", + "tsconfig": "^7.0.0" + }, + "peerDependencies": { + "@babel/core": "7.x", + "babel-jest": "27.x", + "jest": "27.x", + "ts-jest": "27.x", + "typescript": ">= 3.x", + "vue": "^3.0.0-0" + }, + "peerDependenciesMeta": { + "ts-jest": { + "optional": true + }, + "typescript": { + "optional": true + } + } + }, + "node_modules/@vue/vue3-jest/node_modules/convert-source-map": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/convert-source-map/-/convert-source-map-1.9.0.tgz", + "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==", + "dev": true + }, + "node_modules/@vue/vue3-jest/node_modules/source-map": { + "version": "0.5.6", + "resolved": "https://registry.npmmirror.com/source-map/-/source-map-0.5.6.tgz", + "integrity": "sha512-MjZkVp0NHr5+TPihLcadqnlVoGIoWo4IBHptutGh9wI3ttUYvCG26HkSuDi+K6lsZ25syXJXcctwgyVCt//xqA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@vue/web-component-wrapper": { + "version": "1.3.0", + "resolved": "https://registry.npmmirror.com/@vue/web-component-wrapper/-/web-component-wrapper-1.3.0.tgz", + "integrity": "sha512-Iu8Tbg3f+emIIMmI2ycSI8QcEuAUgPTgHwesDU1eKMLE4YC/c/sFbGc70QgMq31ijRftV0R7vCm9co6rldCeOA==", + "dev": true + }, + "node_modules/@vueuse/core": { + "version": "9.13.0", + "resolved": "https://registry.npmmirror.com/@vueuse/core/-/core-9.13.0.tgz", + "integrity": "sha512-pujnclbeHWxxPRqXWmdkKV5OX4Wk4YeK7wusHqRwU0Q7EFusHoqNA/aPhB6KCh9hEqJkLAJo7bb0Lh9b+OIVzw==", + "dependencies": { + "@types/web-bluetooth": "^0.0.16", + "@vueuse/metadata": "9.13.0", + "@vueuse/shared": "9.13.0", + "vue-demi": "*" + } + }, + "node_modules/@vueuse/core/node_modules/vue-demi": { + "version": "0.14.7", + "resolved": "https://registry.npmmirror.com/vue-demi/-/vue-demi-0.14.7.tgz", + "integrity": "sha512-EOG8KXDQNwkJILkx/gPcoL/7vH+hORoBaKgGe+6W7VFMvCYJfmF2dGbvgDroVnI8LU7/kTu8mbjRZGBU1z9NTA==", + "hasInstallScript": true, + "bin": { + "vue-demi-fix": "bin/vue-demi-fix.js", + "vue-demi-switch": "bin/vue-demi-switch.js" + }, + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "@vue/composition-api": "^1.0.0-rc.1", + "vue": "^3.0.0-0 || ^2.6.0" + }, + "peerDependenciesMeta": { + "@vue/composition-api": { + "optional": true + } + } + }, + "node_modules/@vueuse/metadata": { + "version": "9.13.0", + "resolved": "https://registry.npmmirror.com/@vueuse/metadata/-/metadata-9.13.0.tgz", + "integrity": "sha512-gdU7TKNAUVlXXLbaF+ZCfte8BjRJQWPCa2J55+7/h+yDtzw3vOoGQDRXzI6pyKyo6bXFT5/QoPE4hAknExjRLQ==" + }, + "node_modules/@vueuse/shared": { + "version": "9.13.0", + "resolved": "https://registry.npmmirror.com/@vueuse/shared/-/shared-9.13.0.tgz", + "integrity": "sha512-UrnhU+Cnufu4S6JLCPZnkWh0WwZGUp72ktOF2DFptMlOs3TOdVv8xJN53zhHGARmVOsz5KqOls09+J1NR6sBKw==", + "dependencies": { + "vue-demi": "*" + } + }, + "node_modules/@vueuse/shared/node_modules/vue-demi": { + "version": "0.14.7", + "resolved": "https://registry.npmmirror.com/vue-demi/-/vue-demi-0.14.7.tgz", + "integrity": "sha512-EOG8KXDQNwkJILkx/gPcoL/7vH+hORoBaKgGe+6W7VFMvCYJfmF2dGbvgDroVnI8LU7/kTu8mbjRZGBU1z9NTA==", + "hasInstallScript": true, + "bin": { + "vue-demi-fix": "bin/vue-demi-fix.js", + "vue-demi-switch": "bin/vue-demi-switch.js" + }, + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "@vue/composition-api": "^1.0.0-rc.1", + "vue": "^3.0.0-0 || ^2.6.0" + }, + "peerDependenciesMeta": { + "@vue/composition-api": { + "optional": true + } + } + }, + "node_modules/@webassemblyjs/ast": { + "version": "1.12.1", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/ast/-/ast-1.12.1.tgz", + "integrity": "sha512-EKfMUOPRRUTy5UII4qJDGPpqfwjOmZ5jeGFwid9mnoqIFK+e0vqoi1qH56JpmZSzEL53jKnNzScdmftJyG5xWg==", + "dependencies": { + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + } + }, + "node_modules/@webassemblyjs/floating-point-hex-parser": { + "version": "1.11.6", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz", + "integrity": "sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==" + }, + "node_modules/@webassemblyjs/helper-api-error": { + "version": "1.11.6", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz", + "integrity": "sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==" + }, + "node_modules/@webassemblyjs/helper-buffer": { + "version": "1.12.1", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/helper-buffer/-/helper-buffer-1.12.1.tgz", + "integrity": "sha512-nzJwQw99DNDKr9BVCOZcLuJJUlqkJh+kVzVl6Fmq/tI5ZtEyWT1KZMyOXltXLZJmDtvLCDgwsyrkohEtopTXCw==" + }, + "node_modules/@webassemblyjs/helper-code-frame": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/helper-code-frame/-/helper-code-frame-1.9.0.tgz", + "integrity": "sha512-ERCYdJBkD9Vu4vtjUYe8LZruWuNIToYq/ME22igL+2vj2dQ2OOujIZr3MEFvfEaqKoVqpsFKAGsRdBSBjrIvZA==", + "peer": true, + "dependencies": { + "@webassemblyjs/wast-printer": "1.9.0" + } + }, + "node_modules/@webassemblyjs/helper-code-frame/node_modules/@webassemblyjs/ast": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/ast/-/ast-1.9.0.tgz", + "integrity": "sha512-C6wW5L+b7ogSDVqymbkkvuW9kruN//YisMED04xzeBBqjHa2FYnmvOlS6Xj68xWQRgWvI9cIglsjFowH/RJyEA==", + "peer": true, + "dependencies": { + "@webassemblyjs/helper-module-context": "1.9.0", + "@webassemblyjs/helper-wasm-bytecode": "1.9.0", + "@webassemblyjs/wast-parser": "1.9.0" + } + }, + "node_modules/@webassemblyjs/helper-code-frame/node_modules/@webassemblyjs/helper-wasm-bytecode": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.9.0.tgz", + "integrity": "sha512-R7FStIzyNcd7xKxCZH5lE0Bqy+hGTwS3LJjuv1ZVxd9O7eHCedSdrId/hMOd20I+v8wDXEn+bjfKDLzTepoaUw==", + "peer": true + }, + "node_modules/@webassemblyjs/helper-code-frame/node_modules/@webassemblyjs/wast-printer": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/wast-printer/-/wast-printer-1.9.0.tgz", + "integrity": "sha512-2J0nE95rHXHyQ24cWjMKJ1tqB/ds8z/cyeOZxJhcb+rW+SQASVjuznUSmdz5GpVJTzU8JkhYut0D3siFDD6wsA==", + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.9.0", + "@webassemblyjs/wast-parser": "1.9.0", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/helper-fsm": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/helper-fsm/-/helper-fsm-1.9.0.tgz", + "integrity": "sha512-OPRowhGbshCb5PxJ8LocpdX9Kl0uB4XsAjl6jH/dWKlk/mzsANvhwbiULsaiqT5GZGT9qinTICdj6PLuM5gslw==", + "peer": true + }, + "node_modules/@webassemblyjs/helper-module-context": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/helper-module-context/-/helper-module-context-1.9.0.tgz", + "integrity": "sha512-MJCW8iGC08tMk2enck1aPW+BE5Cw8/7ph/VGZxwyvGbJwjktKkDK7vy7gAmMDx88D7mhDTCNKAW5tED+gZ0W8g==", + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.9.0" + } + }, + "node_modules/@webassemblyjs/helper-module-context/node_modules/@webassemblyjs/ast": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/ast/-/ast-1.9.0.tgz", + "integrity": "sha512-C6wW5L+b7ogSDVqymbkkvuW9kruN//YisMED04xzeBBqjHa2FYnmvOlS6Xj68xWQRgWvI9cIglsjFowH/RJyEA==", + "peer": true, + "dependencies": { + "@webassemblyjs/helper-module-context": "1.9.0", + "@webassemblyjs/helper-wasm-bytecode": "1.9.0", + "@webassemblyjs/wast-parser": "1.9.0" + } + }, + "node_modules/@webassemblyjs/helper-module-context/node_modules/@webassemblyjs/helper-wasm-bytecode": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.9.0.tgz", + "integrity": "sha512-R7FStIzyNcd7xKxCZH5lE0Bqy+hGTwS3LJjuv1ZVxd9O7eHCedSdrId/hMOd20I+v8wDXEn+bjfKDLzTepoaUw==", + "peer": true + }, + "node_modules/@webassemblyjs/helper-numbers": { + "version": "1.11.6", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz", + "integrity": "sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==", + "dependencies": { + "@webassemblyjs/floating-point-hex-parser": "1.11.6", + "@webassemblyjs/helper-api-error": "1.11.6", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/helper-wasm-bytecode": { + "version": "1.11.6", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz", + "integrity": "sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==" + }, + "node_modules/@webassemblyjs/helper-wasm-section": { + "version": "1.12.1", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.12.1.tgz", + "integrity": "sha512-Jif4vfB6FJlUlSbgEMHUyk1j234GTNG9dBJ4XJdOySoj518Xj0oGsNi59cUQF4RRMS9ouBUxDDdyBVfPTypa5g==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-buffer": "1.12.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/wasm-gen": "1.12.1" + } + }, + "node_modules/@webassemblyjs/ieee754": { + "version": "1.11.6", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz", + "integrity": "sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==", + "dependencies": { + "@xtuc/ieee754": "^1.2.0" + } + }, + "node_modules/@webassemblyjs/leb128": { + "version": "1.11.6", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/leb128/-/leb128-1.11.6.tgz", + "integrity": "sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==", + "dependencies": { + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/utf8": { + "version": "1.11.6", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/utf8/-/utf8-1.11.6.tgz", + "integrity": "sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==" + }, + "node_modules/@webassemblyjs/wasm-edit": { + "version": "1.12.1", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/wasm-edit/-/wasm-edit-1.12.1.tgz", + "integrity": "sha512-1DuwbVvADvS5mGnXbE+c9NfA8QRcZ6iKquqjjmR10k6o+zzsRVesil54DKexiowcFCPdr/Q0qaMgB01+SQ1u6g==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-buffer": "1.12.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/helper-wasm-section": "1.12.1", + "@webassemblyjs/wasm-gen": "1.12.1", + "@webassemblyjs/wasm-opt": "1.12.1", + "@webassemblyjs/wasm-parser": "1.12.1", + "@webassemblyjs/wast-printer": "1.12.1" + } + }, + "node_modules/@webassemblyjs/wasm-gen": { + "version": "1.12.1", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/wasm-gen/-/wasm-gen-1.12.1.tgz", + "integrity": "sha512-TDq4Ojh9fcohAw6OIMXqiIcTq5KUXTGRkVxbSo1hQnSy6lAM5GSdfwWeSxpAo0YzgsgF182E/U0mDNhuA0tW7w==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-opt": { + "version": "1.12.1", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/wasm-opt/-/wasm-opt-1.12.1.tgz", + "integrity": "sha512-Jg99j/2gG2iaz3hijw857AVYekZe2SAskcqlWIZXjji5WStnOpVoat3gQfT/Q5tb2djnCjBtMocY/Su1GfxPBg==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-buffer": "1.12.1", + "@webassemblyjs/wasm-gen": "1.12.1", + "@webassemblyjs/wasm-parser": "1.12.1" + } + }, + "node_modules/@webassemblyjs/wasm-parser": { + "version": "1.12.1", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/wasm-parser/-/wasm-parser-1.12.1.tgz", + "integrity": "sha512-xikIi7c2FHXysxXe3COrVUPSheuBtpcfhbpFj4gmu7KRLYOzANztwUU0IbsqvMqzuNK2+glRGWCEqZo1WCLyAQ==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-api-error": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wast-parser": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/wast-parser/-/wast-parser-1.9.0.tgz", + "integrity": "sha512-qsqSAP3QQ3LyZjNC/0jBJ/ToSxfYJ8kYyuiGvtn/8MK89VrNEfwj7BPQzJVHi0jGTRK2dGdJ5PRqhtjzoww+bw==", + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.9.0", + "@webassemblyjs/floating-point-hex-parser": "1.9.0", + "@webassemblyjs/helper-api-error": "1.9.0", + "@webassemblyjs/helper-code-frame": "1.9.0", + "@webassemblyjs/helper-fsm": "1.9.0", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/wast-parser/node_modules/@webassemblyjs/ast": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/ast/-/ast-1.9.0.tgz", + "integrity": "sha512-C6wW5L+b7ogSDVqymbkkvuW9kruN//YisMED04xzeBBqjHa2FYnmvOlS6Xj68xWQRgWvI9cIglsjFowH/RJyEA==", + "peer": true, + "dependencies": { + "@webassemblyjs/helper-module-context": "1.9.0", + "@webassemblyjs/helper-wasm-bytecode": "1.9.0", + "@webassemblyjs/wast-parser": "1.9.0" + } + }, + "node_modules/@webassemblyjs/wast-parser/node_modules/@webassemblyjs/floating-point-hex-parser": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.9.0.tgz", + "integrity": "sha512-TG5qcFsS8QB4g4MhrxK5TqfdNe7Ey/7YL/xN+36rRjl/BlGE/NcBvJcqsRgCP6Z92mRE+7N50pRIi8SmKUbcQA==", + "peer": true + }, + "node_modules/@webassemblyjs/wast-parser/node_modules/@webassemblyjs/helper-api-error": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/helper-api-error/-/helper-api-error-1.9.0.tgz", + "integrity": "sha512-NcMLjoFMXpsASZFxJ5h2HZRcEhDkvnNFOAKneP5RbKRzaWJN36NC4jqQHKwStIhGXu5mUWlUUk7ygdtrO8lbmw==", + "peer": true + }, + "node_modules/@webassemblyjs/wast-parser/node_modules/@webassemblyjs/helper-wasm-bytecode": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.9.0.tgz", + "integrity": "sha512-R7FStIzyNcd7xKxCZH5lE0Bqy+hGTwS3LJjuv1ZVxd9O7eHCedSdrId/hMOd20I+v8wDXEn+bjfKDLzTepoaUw==", + "peer": true + }, + "node_modules/@webassemblyjs/wast-printer": { + "version": "1.12.1", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/wast-printer/-/wast-printer-1.12.1.tgz", + "integrity": "sha512-+X4WAlOisVWQMikjbcvY2e0rwPsKQ9F688lksZhBcPycBBuii3O7m8FACbDMWDojpAqvjIncrG8J0XHKyQfVeA==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webpack-cli/configtest": { + "version": "2.1.1", + "resolved": "https://registry.npmmirror.com/@webpack-cli/configtest/-/configtest-2.1.1.tgz", + "integrity": "sha512-wy0mglZpDSiSS0XHrVR+BAdId2+yxPSoJW8fsna3ZpYSlufjvxnP4YbKTCBZnNIcGN4r6ZPXV55X4mYExOfLmw==", + "engines": { + "node": ">=14.15.0" + }, + "peerDependencies": { + "webpack": "5.x.x", + "webpack-cli": "5.x.x" + } + }, + "node_modules/@webpack-cli/info": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/@webpack-cli/info/-/info-2.0.2.tgz", + "integrity": "sha512-zLHQdI/Qs1UyT5UBdWNqsARasIA+AaF8t+4u2aS2nEpBQh2mWIVb8qAklq0eUENnC5mOItrIB4LiS9xMtph18A==", + "engines": { + "node": ">=14.15.0" + }, + "peerDependencies": { + "webpack": "5.x.x", + "webpack-cli": "5.x.x" + } + }, + "node_modules/@webpack-cli/serve": { + "version": "2.0.5", + "resolved": "https://registry.npmmirror.com/@webpack-cli/serve/-/serve-2.0.5.tgz", + "integrity": "sha512-lqaoKnRYBdo1UgDX8uF24AfGMifWK19TxPmM5FHc2vAGxrJ/qtyUyFBWoY1tISZdelsQ5fBcOusifo5o5wSJxQ==", + "engines": { + "node": ">=14.15.0" + }, + "peerDependencies": { + "webpack": "5.x.x", + "webpack-cli": "5.x.x" + }, + "peerDependenciesMeta": { + "webpack-dev-server": { + "optional": true + } + } + }, + "node_modules/@xtuc/ieee754": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/@xtuc/ieee754/-/ieee754-1.2.0.tgz", + "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==" + }, + "node_modules/@xtuc/long": { + "version": "4.2.2", + "resolved": "https://registry.npmmirror.com/@xtuc/long/-/long-4.2.2.tgz", + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==" + }, + "node_modules/@yr/monotone-cubic-spline": { + "version": "1.0.3", + "resolved": "https://registry.npmmirror.com/@yr/monotone-cubic-spline/-/monotone-cubic-spline-1.0.3.tgz", + "integrity": "sha512-FQXkOta0XBSUPHndIKON2Y9JeQz5ZeMqLYZVVK93FliNBFm7LNMIZmY6FrMEB9XPcDbE2bekMbZD6kzDkxwYjA==" + }, + "node_modules/abab": { + "version": "2.0.6", + "resolved": "https://registry.npmmirror.com/abab/-/abab-2.0.6.tgz", + "integrity": "sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA==", + "deprecated": "Use your platform's native atob() and btoa() methods instead", + "dev": true + }, + "node_modules/abbrev": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/abbrev/-/abbrev-2.0.0.tgz", + "integrity": "sha512-6/mh1E2u2YgEsCHdY0Yx5oW+61gZU+1vXaoiHHrpKeuRNNgFvS+/jrwHiQhB5apAf5oB7UB7E19ol2R2LKH8hQ==", + "dev": true, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmmirror.com/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/acorn": { + "version": "8.11.3", + "resolved": "https://registry.npmmirror.com/acorn/-/acorn-8.11.3.tgz", + "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-globals": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/acorn-globals/-/acorn-globals-6.0.0.tgz", + "integrity": "sha512-ZQl7LOWaF5ePqqcX4hLuv/bLXYQNfNWw2c0/yX/TsPRKamzHcTGQnlCjHT3TsmkOUVEPS3crCxiPfdzE/Trlhg==", + "dev": true, + "dependencies": { + "acorn": "^7.1.1", + "acorn-walk": "^7.1.1" + } + }, + "node_modules/acorn-globals/node_modules/acorn": { + "version": "7.4.1", + "resolved": "https://registry.npmmirror.com/acorn/-/acorn-7.4.1.tgz", + "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==", + "dev": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-globals/node_modules/acorn-walk": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/acorn-walk/-/acorn-walk-7.2.0.tgz", + "integrity": "sha512-OPdCF6GsMIP+Az+aWfAAOEt2/+iVDKE7oy6lJ098aoe59oAmK76qV6Gw60SbZ8jHuG2wH058GF4pLFbYamYrVA==", + "dev": true, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-import-assertions": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/acorn-import-assertions/-/acorn-import-assertions-1.9.0.tgz", + "integrity": "sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==", + "peerDependencies": { + "acorn": "^8" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmmirror.com/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.2", + "resolved": "https://registry.npmmirror.com/acorn-walk/-/acorn-walk-8.3.2.tgz", + "integrity": "sha512-cjkyv4OtNCIeqhHrfS81QWXoCBPExR/J62oyEqepVw8WaQeSqpW2uhuLPh1m9eWhDuOo/jUXVTlifvesOWp/4A==", + "dev": true, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/address": { + "version": "1.2.2", + "resolved": "https://registry.npmmirror.com/address/-/address-1.2.2.tgz", + "integrity": "sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA==", + "dev": true, + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/agent-base": { + "version": "6.0.2", + "resolved": "https://registry.npmmirror.com/agent-base/-/agent-base-6.0.2.tgz", + "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "devOptional": true, + "dependencies": { + "debug": "4" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/aggregate-error": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "dependencies": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmmirror.com/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-errors": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/ajv-errors/-/ajv-errors-1.0.1.tgz", + "integrity": "sha512-DCRfO/4nQ+89p/RK43i8Ezd41EqdGIU4ld7nGF8OQ14oc/we5rEntLCUa7+jrn3nn83BosfwZA0wb4pon2o8iQ==", + "peer": true, + "peerDependencies": { + "ajv": ">=5.0.0" + } + }, + "node_modules/ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmmirror.com/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "dev": true, + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/ajv-formats/node_modules/ajv": { + "version": "8.13.0", + "resolved": "https://registry.npmmirror.com/ajv/-/ajv-8.13.0.tgz", + "integrity": "sha512-PRA911Blj99jR5RMeTunVbNXMF6Lp4vZXnk5GQjcnUWUTsrXtekg/pnmFFI2u/I36Y/2bITGS30GZCXei6uNkA==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.3", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.4.1" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-formats/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "dev": true + }, + "node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmmirror.com/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/ansi-align": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/ansi-align/-/ansi-align-3.0.1.tgz", + "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", + "dependencies": { + "string-width": "^4.1.0" + } + }, + "node_modules/ansi-colors": { + "version": "4.1.3", + "resolved": "https://registry.npmmirror.com/ansi-colors/-/ansi-colors-4.1.3.tgz", + "integrity": "sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmmirror.com/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-escapes/node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmmirror.com/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-html-community": { + "version": "0.0.8", + "resolved": "https://registry.npmmirror.com/ansi-html-community/-/ansi-html-community-0.0.8.tgz", + "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==", + "dev": true, + "engines": [ + "node >= 0.8.0" + ], + "bin": { + "ansi-html": "bin/ansi-html" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmmirror.com/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/ant-design-vue": { + "version": "4.2.1", + "resolved": "https://registry.npmmirror.com/ant-design-vue/-/ant-design-vue-4.2.1.tgz", + "integrity": "sha512-3u6fmfCEJ5AFTsYhogP8lJ/vcqiAJO16o+gGQkWYRGLl0NxmY4hje4cPyv+pcxpeJgcG0vNEmkb1vVHKcnxd+g==", + "dependencies": { + "@ant-design/colors": "^6.0.0", + "@ant-design/icons-vue": "^7.0.0", + "@babel/runtime": "^7.10.5", + "@ctrl/tinycolor": "^3.5.0", + "@emotion/hash": "^0.9.0", + "@emotion/unitless": "^0.8.0", + "@simonwep/pickr": "~1.8.0", + "array-tree-filter": "^2.1.0", + "async-validator": "^4.0.0", + "csstype": "^3.1.1", + "dayjs": "^1.10.5", + "dom-align": "^1.12.1", + "dom-scroll-into-view": "^2.0.0", + "lodash": "^4.17.21", + "lodash-es": "^4.17.15", + "resize-observer-polyfill": "^1.5.1", + "scroll-into-view-if-needed": "^2.2.25", + "shallow-equal": "^1.0.0", + "stylis": "^4.1.3", + "throttle-debounce": "^5.0.0", + "vue-types": "^3.0.0", + "warning": "^4.0.0" + }, + "engines": { + "node": ">=12.22.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/ant-design-vue" + }, + "peerDependencies": { + "vue": ">=3.2.0" + } + }, + "node_modules/any-promise": { + "version": "1.3.0", + "resolved": "https://registry.npmmirror.com/any-promise/-/any-promise-1.3.0.tgz", + "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", + "dev": true + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmmirror.com/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "devOptional": true, + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/apexcharts": { + "version": "3.49.1", + "resolved": "https://registry.npmmirror.com/apexcharts/-/apexcharts-3.49.1.tgz", + "integrity": "sha512-MqGtlq/KQuO8j0BBsUJYlRG8VBctKwYdwuBtajHgHTmSgUU3Oai+8oYN/rKCXwXzrUlYA+GiMgotAIbXY2BCGw==", + "dependencies": { + "@yr/monotone-cubic-spline": "^1.0.3", + "svg.draggable.js": "^2.2.2", + "svg.easing.js": "^2.0.0", + "svg.filter.js": "^2.0.2", + "svg.pathmorphing.js": "^0.1.3", + "svg.resize.js": "^1.4.3", + "svg.select.js": "^3.0.1" + } + }, + "node_modules/apollo-datasource": { + "version": "3.3.2", + "resolved": "https://registry.npmmirror.com/apollo-datasource/-/apollo-datasource-3.3.2.tgz", + "integrity": "sha512-L5TiS8E2Hn/Yz7SSnWIVbZw0ZfEIXZCa5VUiVxD9P53JvSrf4aStvsFDlGWPvpIdCR+aly2CfoB79B9/JjKFqg==", + "deprecated": "The `apollo-datasource` package is part of Apollo Server v2 and v3, which are now deprecated (end-of-life October 22nd 2023 and October 22nd 2024, respectively). See https://www.apollographql.com/docs/apollo-server/previous-versions/ for more details.", + "dependencies": { + "@apollo/utils.keyvaluecache": "^1.0.1", + "apollo-server-env": "^4.2.1" + }, + "engines": { + "node": ">=12.0" + } + }, + "node_modules/apollo-reporting-protobuf": { + "version": "3.4.0", + "resolved": "https://registry.npmmirror.com/apollo-reporting-protobuf/-/apollo-reporting-protobuf-3.4.0.tgz", + "integrity": "sha512-h0u3EbC/9RpihWOmcSsvTW2O6RXVaD/mPEjfrPkxRPTEPWqncsgOoRJw+wih4OqfH3PvTJvoEIf4LwKrUaqWog==", + "deprecated": "The `apollo-reporting-protobuf` package is part of Apollo Server v2 and v3, which are now deprecated (end-of-life October 22nd 2023 and October 22nd 2024, respectively). This package's functionality is now found in the `@apollo/usage-reporting-protobuf` package. See https://www.apollographql.com/docs/apollo-server/previous-versions/ for more details.", + "dependencies": { + "@apollo/protobufjs": "1.2.6" + } + }, + "node_modules/apollo-reporting-protobuf/node_modules/@apollo/protobufjs": { + "version": "1.2.6", + "resolved": "https://registry.npmmirror.com/@apollo/protobufjs/-/protobufjs-1.2.6.tgz", + "integrity": "sha512-Wqo1oSHNUj/jxmsVp4iR3I480p6qdqHikn38lKrFhfzcDJ7lwd7Ck7cHRl4JE81tWNArl77xhnG/OkZhxKBYOw==", + "hasInstallScript": true, + "dependencies": { + "@protobufjs/aspromise": "^1.1.2", + "@protobufjs/base64": "^1.1.2", + "@protobufjs/codegen": "^2.0.4", + "@protobufjs/eventemitter": "^1.1.0", + "@protobufjs/fetch": "^1.1.0", + "@protobufjs/float": "^1.0.2", + "@protobufjs/inquire": "^1.1.0", + "@protobufjs/path": "^1.1.2", + "@protobufjs/pool": "^1.1.0", + "@protobufjs/utf8": "^1.1.0", + "@types/long": "^4.0.0", + "@types/node": "^10.1.0", + "long": "^4.0.0" + }, + "bin": { + "apollo-pbjs": "bin/pbjs", + "apollo-pbts": "bin/pbts" + } + }, + "node_modules/apollo-reporting-protobuf/node_modules/@types/node": { + "version": "10.17.60", + "resolved": "https://registry.npmmirror.com/@types/node/-/node-10.17.60.tgz", + "integrity": "sha512-F0KIgDJfy2nA3zMLmWGKxcH2ZVEtCZXHHdOQs2gSaQ27+lNeEfGxzkIw90aXswATX7AZ33tahPbzy6KAfUreVw==" + }, + "node_modules/apollo-server-core": { + "version": "3.13.0", + "resolved": "https://registry.npmmirror.com/apollo-server-core/-/apollo-server-core-3.13.0.tgz", + "integrity": "sha512-v/g6DR6KuHn9DYSdtQijz8dLOkP78I5JSVJzPkARhDbhpH74QNwrQ2PP2URAPPEDJ2EeZNQDX8PvbYkAKqg+kg==", + "dependencies": { + "@apollo/utils.keyvaluecache": "^1.0.1", + "@apollo/utils.logger": "^1.0.0", + "@apollo/utils.usagereporting": "^1.0.0", + "@apollographql/apollo-tools": "^0.5.3", + "@apollographql/graphql-playground-html": "1.6.29", + "@graphql-tools/mock": "^8.1.2", + "@graphql-tools/schema": "^8.0.0", + "@josephg/resolvable": "^1.0.0", + "apollo-datasource": "^3.3.2", + "apollo-reporting-protobuf": "^3.4.0", + "apollo-server-env": "^4.2.1", + "apollo-server-errors": "^3.3.1", + "apollo-server-plugin-base": "^3.7.2", + "apollo-server-types": "^3.8.0", + "async-retry": "^1.2.1", + "fast-json-stable-stringify": "^2.1.0", + "graphql-tag": "^2.11.0", + "loglevel": "^1.6.8", + "lru-cache": "^6.0.0", + "node-abort-controller": "^3.0.1", + "sha.js": "^2.4.11", + "uuid": "^9.0.0", + "whatwg-mimetype": "^3.0.0" + }, + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "graphql": "^15.3.0 || ^16.0.0" + } + }, + "node_modules/apollo-server-core/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/apollo-server-core/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + }, + "node_modules/apollo-server-env": { + "version": "4.2.1", + "resolved": "https://registry.npmmirror.com/apollo-server-env/-/apollo-server-env-4.2.1.tgz", + "integrity": "sha512-vm/7c7ld+zFMxibzqZ7SSa5tBENc4B0uye9LTfjJwGoQFY5xsUPH5FpO5j0bMUDZ8YYNbrF9SNtzc5Cngcr90g==", + "deprecated": "The `apollo-server-env` package is part of Apollo Server v2 and v3, which are now deprecated (end-of-life October 22nd 2023 and October 22nd 2024, respectively). This package's functionality is now found in the `@apollo/utils.fetcher` package. See https://www.apollographql.com/docs/apollo-server/previous-versions/ for more details.", + "dependencies": { + "node-fetch": "^2.6.7" + }, + "engines": { + "node": ">=12.0" + } + }, + "node_modules/apollo-server-errors": { + "version": "3.3.1", + "resolved": "https://registry.npmmirror.com/apollo-server-errors/-/apollo-server-errors-3.3.1.tgz", + "integrity": "sha512-xnZJ5QWs6FixHICXHxUfm+ZWqqxrNuPlQ+kj5m6RtEgIpekOPssH/SD9gf2B4HuWV0QozorrygwZnux8POvyPA==", + "deprecated": "The `apollo-server-errors` package is part of Apollo Server v2 and v3, which are now deprecated (end-of-life October 22nd 2023 and October 22nd 2024, respectively). This package's functionality is now found in the `@apollo/server` package. See https://www.apollographql.com/docs/apollo-server/previous-versions/ for more details.", + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "graphql": "^15.3.0 || ^16.0.0" + } + }, + "node_modules/apollo-server-express": { + "version": "3.13.0", + "resolved": "https://registry.npmmirror.com/apollo-server-express/-/apollo-server-express-3.13.0.tgz", + "integrity": "sha512-iSxICNbDUyebOuM8EKb3xOrpIwOQgKxGbR2diSr4HP3IW8T3njKFOoMce50vr+moOCe1ev8BnLcw9SNbuUtf7g==", + "dependencies": { + "@types/accepts": "^1.3.5", + "@types/body-parser": "1.19.2", + "@types/cors": "2.8.12", + "@types/express": "4.17.14", + "@types/express-serve-static-core": "4.17.31", + "accepts": "^1.3.5", + "apollo-server-core": "^3.13.0", + "apollo-server-types": "^3.8.0", + "body-parser": "^1.19.0", + "cors": "^2.8.5", + "parseurl": "^1.3.3" + }, + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "express": "^4.17.1", + "graphql": "^15.3.0 || ^16.0.0" + } + }, + "node_modules/apollo-server-plugin-base": { + "version": "3.7.2", + "resolved": "https://registry.npmmirror.com/apollo-server-plugin-base/-/apollo-server-plugin-base-3.7.2.tgz", + "integrity": "sha512-wE8dwGDvBOGehSsPTRZ8P/33Jan6/PmL0y0aN/1Z5a5GcbFhDaaJCjK5cav6npbbGL2DPKK0r6MPXi3k3N45aw==", + "deprecated": "The `apollo-server-plugin-base` package is part of Apollo Server v2 and v3, which are now deprecated (end-of-life October 22nd 2023 and October 22nd 2024, respectively). This package's functionality is now found in the `@apollo/server` package. See https://www.apollographql.com/docs/apollo-server/previous-versions/ for more details.", + "dependencies": { + "apollo-server-types": "^3.8.0" + }, + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "graphql": "^15.3.0 || ^16.0.0" + } + }, + "node_modules/apollo-server-types": { + "version": "3.8.0", + "resolved": "https://registry.npmmirror.com/apollo-server-types/-/apollo-server-types-3.8.0.tgz", + "integrity": "sha512-ZI/8rTE4ww8BHktsVpb91Sdq7Cb71rdSkXELSwdSR0eXu600/sY+1UXhTWdiJvk+Eq5ljqoHLwLbY2+Clq2b9A==", + "deprecated": "The `apollo-server-types` package is part of Apollo Server v2 and v3, which are now deprecated (end-of-life October 22nd 2023 and October 22nd 2024, respectively). This package's functionality is now found in the `@apollo/server` package. See https://www.apollographql.com/docs/apollo-server/previous-versions/ for more details.", + "dependencies": { + "@apollo/utils.keyvaluecache": "^1.0.1", + "@apollo/utils.logger": "^1.0.0", + "apollo-reporting-protobuf": "^3.4.0", + "apollo-server-env": "^4.2.1" + }, + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "graphql": "^15.3.0 || ^16.0.0" + } + }, + "node_modules/aproba": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/aproba/-/aproba-1.2.0.tgz", + "integrity": "sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw==" + }, + "node_modules/arch": { + "version": "2.2.0", + "resolved": "https://registry.npmmirror.com/arch/-/arch-2.2.0.tgz", + "integrity": "sha512-Of/R0wqp83cgHozfIYLbBMnej79U/SVGOOyuB3VVFv1NRM/PSFMK12x9KVtiYzJqmnU5WR2qp0Z5rHb7sWGnFQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/archive-type": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/archive-type/-/archive-type-4.0.0.tgz", + "integrity": "sha512-zV4Ky0v1F8dBrdYElwTvQhweQ0P7Kwc1aluqJsYtOBP01jXcWCyW2IEfI1YiqsG+Iy7ZR+o5LF1N+PGECBxHWA==", + "dependencies": { + "file-type": "^4.2.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/archive-type/node_modules/file-type": { + "version": "4.4.0", + "resolved": "https://registry.npmmirror.com/file-type/-/file-type-4.4.0.tgz", + "integrity": "sha512-f2UbFQEk7LXgWpi5ntcO86OeA/cC80fuDDDaX/fZ2ZGel+AF7leRQqBBW1eJNiiQkrZlAoM6P+VYP5P6bOlDEQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/are-we-there-yet": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/are-we-there-yet/-/are-we-there-yet-2.0.0.tgz", + "integrity": "sha512-Ci/qENmwHnsYo9xKIcUJN5LeDKdJ6R1Z1j9V/J5wyq8nh/mYPEpIKJbBZXtZjG04HiK7zV/p6Vs9952MrMeUIw==", + "optional": true, + "dependencies": { + "delegates": "^1.0.0", + "readable-stream": "^3.6.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/are-we-there-yet/node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmmirror.com/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "optional": true, + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" + }, + "node_modules/arr-diff": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/arr-diff/-/arr-diff-4.0.0.tgz", + "integrity": "sha512-YVIQ82gZPGBebQV/a8dar4AitzCQs0jjXwMPZllpXMaGjXPYVUawSxQrRsjhjupyVxEvbHgUmIhKVlND+j02kA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/arr-flatten": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/arr-flatten/-/arr-flatten-1.1.0.tgz", + "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/arr-union": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/arr-union/-/arr-union-3.1.0.tgz", + "integrity": "sha512-sKpyeERZ02v1FeCZT8lrfJq5u6goHCtpTAzPwJYe7c8SPFOboNjNg1vz2L4VTn9T4PQxEx13TbXLmYUcS6Ug7Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmmirror.com/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" + }, + "node_modules/array-tree-filter": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/array-tree-filter/-/array-tree-filter-2.1.0.tgz", + "integrity": "sha512-4ROwICNlNw/Hqa9v+rk5h22KjmzB1JGTMVKP2AKJBOCgb0yL0ASf0+YvCcLNNwquOHNX48jkeZIJ3a+oOQqKcw==" + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/array-unique": { + "version": "0.3.2", + "resolved": "https://registry.npmmirror.com/array-unique/-/array-unique-0.3.2.tgz", + "integrity": "sha512-SleRWjh9JUud2wH1hPs9rZBZ33H6T9HOiL0uwGnGx9FpE6wKGyfWugmbkEOIs6qWrZhg0LWeLziLrEwQJhs5mQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/arrify": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/arrify/-/arrify-2.0.1.tgz", + "integrity": "sha512-3duEwti880xqi4eAMN8AyR4a0ByT90zoYdLlevfrvU43vb0YZwZVfxOgxWrLXXXpyugL0hNZc9G6BiB5B3nUug==", + "engines": { + "node": ">=8" + } + }, + "node_modules/asn1.js": { + "version": "4.10.1", + "resolved": "https://registry.npmmirror.com/asn1.js/-/asn1.js-4.10.1.tgz", + "integrity": "sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw==", + "peer": true, + "dependencies": { + "bn.js": "^4.0.0", + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0" + } + }, + "node_modules/asn1.js/node_modules/bn.js": { + "version": "4.12.0", + "resolved": "https://registry.npmmirror.com/bn.js/-/bn.js-4.12.0.tgz", + "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==", + "peer": true + }, + "node_modules/assert": { + "version": "1.5.1", + "resolved": "https://registry.npmmirror.com/assert/-/assert-1.5.1.tgz", + "integrity": "sha512-zzw1uCAgLbsKwBfFc8CX78DDg+xZeBksSO3vwVIDDN5i94eOrPsSSyiVhmsSABFDM/OcpE2aagCat9dnWQLG1A==", + "peer": true, + "dependencies": { + "object.assign": "^4.1.4", + "util": "^0.10.4" + } + }, + "node_modules/assert/node_modules/inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmmirror.com/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==", + "peer": true + }, + "node_modules/assert/node_modules/util": { + "version": "0.10.4", + "resolved": "https://registry.npmmirror.com/util/-/util-0.10.4.tgz", + "integrity": "sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A==", + "peer": true, + "dependencies": { + "inherits": "2.0.3" + } + }, + "node_modules/assign-symbols": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/assign-symbols/-/assign-symbols-1.0.0.tgz", + "integrity": "sha512-Q+JC7Whu8HhmTdBph/Tq59IoRtoy6KAm5zzPv00WdujX82lbAL8K7WVjne7vdCsAmbF4AYaDOPyO3k0kl8qIrw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ast-types": { + "version": "0.14.2", + "resolved": "https://registry.npmmirror.com/ast-types/-/ast-types-0.14.2.tgz", + "integrity": "sha512-O0yuUDnZeQDL+ncNGlJ78BiO4jnYI3bvMsD5prT0/nsgijG/LpNBIr63gTjVTNsiGkgQhiyCShTgxt8oXOrklA==", + "dependencies": { + "tslib": "^2.0.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/astral-regex": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/astral-regex/-/astral-regex-2.0.0.tgz", + "integrity": "sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/async": { + "version": "3.2.5", + "resolved": "https://registry.npmmirror.com/async/-/async-3.2.5.tgz", + "integrity": "sha512-baNZyqaaLhyLVKm/DlvdW051MSgO6b8eVfIezl9E5PqWxFgzLm/wQntEW4zOytVburDEr0JlALEpdOFwvErLsg==" + }, + "node_modules/async-each": { + "version": "1.0.6", + "resolved": "https://registry.npmmirror.com/async-each/-/async-each-1.0.6.tgz", + "integrity": "sha512-c646jH1avxr+aVpndVMeAfYw7wAa6idufrlN3LPA4PmKS0QEGp6PIC9nwz0WQkkvBGAMEki3pFdtxaF39J9vvg==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ], + "optional": true, + "peer": true + }, + "node_modules/async-retry": { + "version": "1.3.3", + "resolved": "https://registry.npmmirror.com/async-retry/-/async-retry-1.3.3.tgz", + "integrity": "sha512-wfr/jstw9xNi/0teMHrRW7dsz3Lt5ARhYNZ2ewpadnhaIp5mbALhOAP+EAdsC7t4Z6wqsDVv9+W6gm1Dk9mEyw==", + "dependencies": { + "retry": "0.13.1" + } + }, + "node_modules/async-validator": { + "version": "4.2.5", + "resolved": "https://registry.npmmirror.com/async-validator/-/async-validator-4.2.5.tgz", + "integrity": "sha512-7HhHjtERjqlNbZtqNqy2rckN/SpOOlmDliet+lP7k+eKZEjPk3DgyeU9lIXLdeLz0uBbbVp+9Qdow9wJWgwwfg==" + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmmirror.com/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" + }, + "node_modules/at-least-node": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/at-least-node/-/at-least-node-1.0.0.tgz", + "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/atob": { + "version": "2.1.2", + "resolved": "https://registry.npmmirror.com/atob/-/atob-2.1.2.tgz", + "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==", + "bin": { + "atob": "bin/atob.js" + }, + "engines": { + "node": ">= 4.5.0" + } + }, + "node_modules/autoprefixer": { + "version": "10.4.19", + "resolved": "https://registry.npmmirror.com/autoprefixer/-/autoprefixer-10.4.19.tgz", + "integrity": "sha512-BaENR2+zBZ8xXhM4pUaKUxlVdxZ0EZhjvbopwnXmxRUfqDmwSpC2lAi/QXvx7NRdPCo1WKEcEF6mV64si1z4Ew==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "browserslist": "^4.23.0", + "caniuse-lite": "^1.0.30001599", + "fraction.js": "^4.3.7", + "normalize-range": "^0.1.2", + "picocolors": "^1.0.0", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/available-typed-arrays": { + "version": "1.0.7", + "resolved": "https://registry.npmmirror.com/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", + "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==", + "dependencies": { + "possible-typed-array-names": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/axios": { + "version": "1.7.0", + "resolved": "https://registry.npmmirror.com/axios/-/axios-1.7.0.tgz", + "integrity": "sha512-IiB0wQeKyPRdsFVhBgIo31FbzOyf2M6wYl7/NVutFwFBRMiAbjNiydJIHKeLmPugF4kJLfA1uWZ82Is2QzqqFA==", + "dependencies": { + "follow-redirects": "^1.15.6", + "form-data": "^4.0.0", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/axios-extensions": { + "version": "3.1.6", + "resolved": "https://registry.npmmirror.com/axios-extensions/-/axios-extensions-3.1.6.tgz", + "integrity": "sha512-CmwMYxxAw4DcQDJ7/2Iv4GJj1Ao48lJEPieycgZQH6m1KcYZqf9zm2HM/CsULqheCpYxZbiGrCfZf5tVjXqoLg==", + "dependencies": { + "lru-cache": "^7.14.0", + "tslib": "^2.1.0", + "util": "^0.12.3" + }, + "peerDependencies": { + "axios": "*" + } + }, + "node_modules/axios-extensions/node_modules/lru-cache": { + "version": "7.18.3", + "resolved": "https://registry.npmmirror.com/lru-cache/-/lru-cache-7.18.3.tgz", + "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", + "engines": { + "node": ">=12" + } + }, + "node_modules/axios/node_modules/form-data": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/babel-core": { + "version": "7.0.0-bridge.0", + "resolved": "https://registry.npmmirror.com/babel-core/-/babel-core-7.0.0-bridge.0.tgz", + "integrity": "sha512-poPX9mZH/5CSanm50Q+1toVci6pv5KSRv/5TWCwtzQS5XEwn40BcCrgIeMFWP9CKKIniKXNxoIOnOq4VVlGXhg==", + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/babel-jest": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/babel-jest/-/babel-jest-27.5.1.tgz", + "integrity": "sha512-cdQ5dXjGRd0IBRATiQ4mZGlGlRE8kJpjPOixdNRdT+m3UcNqmYWN6rK6nvtXYfY3D76cb8s/O1Ss8ea24PIwcg==", + "dev": true, + "dependencies": { + "@jest/transform": "^27.5.1", + "@jest/types": "^27.5.1", + "@types/babel__core": "^7.1.14", + "babel-plugin-istanbul": "^6.1.1", + "babel-preset-jest": "^27.5.1", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "slash": "^3.0.0" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.8.0" + } + }, + "node_modules/babel-jest/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/babel-jest/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/babel-jest/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/babel-jest/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/babel-jest/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-jest/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-loader": { + "version": "8.3.0", + "resolved": "https://registry.npmmirror.com/babel-loader/-/babel-loader-8.3.0.tgz", + "integrity": "sha512-H8SvsMF+m9t15HNLMipppzkC+Y2Yq+v3SonZyU70RBL/h1gxPkH08Ot8pEE9Z4Kd+czyWJClmFS8qzIP9OZ04Q==", + "dev": true, + "dependencies": { + "find-cache-dir": "^3.3.1", + "loader-utils": "^2.0.0", + "make-dir": "^3.1.0", + "schema-utils": "^2.6.5" + }, + "engines": { + "node": ">= 8.9" + }, + "peerDependencies": { + "@babel/core": "^7.0.0", + "webpack": ">=2" + } + }, + "node_modules/babel-loader/node_modules/loader-utils": { + "version": "2.0.4", + "resolved": "https://registry.npmmirror.com/loader-utils/-/loader-utils-2.0.4.tgz", + "integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==", + "dev": true, + "dependencies": { + "big.js": "^5.2.2", + "emojis-list": "^3.0.0", + "json5": "^2.1.2" + }, + "engines": { + "node": ">=8.9.0" + } + }, + "node_modules/babel-plugin-istanbul": { + "version": "6.1.1", + "resolved": "https://registry.npmmirror.com/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz", + "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-instrument": "^5.0.4", + "test-exclude": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-jest-hoist": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-27.5.1.tgz", + "integrity": "sha512-50wCwD5EMNW4aRpOwtqzyZHIewTYNxLA4nhB+09d8BIssfNfzBRhkBIHiaPv1Si226TQSvp8gxAJm2iY2qs2hQ==", + "dev": true, + "dependencies": { + "@babel/template": "^7.3.3", + "@babel/types": "^7.3.3", + "@types/babel__core": "^7.0.0", + "@types/babel__traverse": "^7.0.6" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs2": { + "version": "0.4.11", + "resolved": "https://registry.npmmirror.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.11.tgz", + "integrity": "sha512-sMEJ27L0gRHShOh5G54uAAPaiCOygY/5ratXuiyb2G46FmlSpc9eFCzYVyDiPxfNbwzA7mYahmjQc5q+CZQ09Q==", + "dependencies": { + "@babel/compat-data": "^7.22.6", + "@babel/helper-define-polyfill-provider": "^0.6.2", + "semver": "^6.3.1" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs3": { + "version": "0.10.4", + "resolved": "https://registry.npmmirror.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.10.4.tgz", + "integrity": "sha512-25J6I8NGfa5YkCDogHRID3fVCadIR8/pGl1/spvCkzb6lVn6SR3ojpx9nOn9iEBcUsjY24AmdKm5khcfKdylcg==", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.6.1", + "core-js-compat": "^3.36.1" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-regenerator": { + "version": "0.6.2", + "resolved": "https://registry.npmmirror.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.2.tgz", + "integrity": "sha512-2R25rQZWP63nGwaAswvDazbPXfrM3HwVoBXK6HcqeKrSrL/JqcC/rDcf95l4r7LXLyxDXc8uQDa064GubtCABg==", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.6.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-syntax-dynamic-import": { + "version": "6.18.0", + "resolved": "https://registry.npmmirror.com/babel-plugin-syntax-dynamic-import/-/babel-plugin-syntax-dynamic-import-6.18.0.tgz", + "integrity": "sha512-MioUE+LfjCEz65Wf7Z/Rm4XCP5k2c+TbMd2Z2JKc7U9uwjBhAfNPE48KC4GTGKhppMeYVepwDBNO/nGY6NYHBA==" + }, + "node_modules/babel-preset-current-node-syntax": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.0.1.tgz", + "integrity": "sha512-M7LQ0bxarkxQoN+vz5aJPsLBn77n8QgTFmo8WK0/44auK2xlCXrYcUxHFxgU7qW5Yzw/CjmLRK2uJzaCd7LvqQ==", + "dev": true, + "dependencies": { + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-bigint": "^7.8.3", + "@babel/plugin-syntax-class-properties": "^7.8.3", + "@babel/plugin-syntax-import-meta": "^7.8.3", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.8.3", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.8.3", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-top-level-await": "^7.8.3" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/babel-preset-jest": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/babel-preset-jest/-/babel-preset-jest-27.5.1.tgz", + "integrity": "sha512-Nptf2FzlPCWYuJg41HBqXVT8ym6bXOevuCTbhxlUpjwtysGaIWFvDEjp4y+G7fl13FgOdjs7P/DmErqH7da0Ag==", + "dev": true, + "dependencies": { + "babel-plugin-jest-hoist": "^27.5.1", + "babel-preset-current-node-syntax": "^1.0.0" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/backo2": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/backo2/-/backo2-1.0.2.tgz", + "integrity": "sha512-zj6Z6M7Eq+PBZ7PQxl5NT665MvJdAkzp0f60nAJ+sLaSCBPMwVak5ZegFbgVCzFcCJTKFoMizvM5Ld7+JrRJHA==" + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + }, + "node_modules/base": { + "version": "0.11.2", + "resolved": "https://registry.npmmirror.com/base/-/base-0.11.2.tgz", + "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==", + "dependencies": { + "cache-base": "^1.0.1", + "class-utils": "^0.3.5", + "component-emitter": "^1.2.1", + "define-property": "^1.0.0", + "isobject": "^3.0.1", + "mixin-deep": "^1.2.0", + "pascalcase": "^0.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/base/node_modules/define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/define-property/-/define-property-1.0.0.tgz", + "integrity": "sha512-cZTYKFWspt9jZsMscWo8sc/5lbPC9Q0N5nBLgb+Yd915iL3udB1uFgS3B8YCx66UVHq018DAVFoee7x+gxggeA==", + "dependencies": { + "is-descriptor": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmmirror.com/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/batch": { + "version": "0.6.1", + "resolved": "https://registry.npmmirror.com/batch/-/batch-0.6.1.tgz", + "integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==", + "dev": true + }, + "node_modules/better-scroll": { + "version": "2.5.1", + "resolved": "https://registry.npmmirror.com/better-scroll/-/better-scroll-2.5.1.tgz", + "integrity": "sha512-OiF3cQroRfTzf+CRQH2z1G52ZAlNHINI6lCAvDmyFu0o0nRuTaV9F+fmBGIU2BL5p5IplUQ4E7sYa1TLfZarzQ==", + "dependencies": { + "@better-scroll/core": "^2.5.1", + "@better-scroll/indicators": "^2.5.1", + "@better-scroll/infinity": "^2.5.1", + "@better-scroll/mouse-wheel": "^2.5.1", + "@better-scroll/movable": "^2.5.1", + "@better-scroll/nested-scroll": "^2.5.1", + "@better-scroll/observe-dom": "^2.5.1", + "@better-scroll/observe-image": "^2.5.1", + "@better-scroll/pull-down": "^2.5.1", + "@better-scroll/pull-up": "^2.5.1", + "@better-scroll/scroll-bar": "^2.5.1", + "@better-scroll/slide": "^2.5.1", + "@better-scroll/wheel": "^2.5.1", + "@better-scroll/zoom": "^2.5.1" + } + }, + "node_modules/big.js": { + "version": "5.2.2", + "resolved": "https://registry.npmmirror.com/big.js/-/big.js-5.2.2.tgz", + "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==", + "engines": { + "node": "*" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmmirror.com/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "devOptional": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/bindings": { + "version": "1.5.0", + "resolved": "https://registry.npmmirror.com/bindings/-/bindings-1.5.0.tgz", + "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", + "optional": true, + "peer": true, + "dependencies": { + "file-uri-to-path": "1.0.0" + } + }, + "node_modules/bl": { + "version": "4.1.0", + "resolved": "https://registry.npmmirror.com/bl/-/bl-4.1.0.tgz", + "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", + "dependencies": { + "buffer": "^5.5.0", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } + }, + "node_modules/bl/node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmmirror.com/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/bluebird": { + "version": "3.7.2", + "resolved": "https://registry.npmmirror.com/bluebird/-/bluebird-3.7.2.tgz", + "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==" + }, + "node_modules/bn.js": { + "version": "5.2.1", + "resolved": "https://registry.npmmirror.com/bn.js/-/bn.js-5.2.1.tgz", + "integrity": "sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ==", + "peer": true + }, + "node_modules/body-parser": { + "version": "1.20.2", + "resolved": "https://registry.npmmirror.com/body-parser/-/body-parser-1.20.2.tgz", + "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", + "dependencies": { + "bytes": "3.1.2", + "content-type": "~1.0.5", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "on-finished": "2.4.1", + "qs": "6.11.0", + "raw-body": "2.5.2", + "type-is": "~1.6.18", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/body-parser/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmmirror.com/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/body-parser/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/bonjour-service": { + "version": "1.2.1", + "resolved": "https://registry.npmmirror.com/bonjour-service/-/bonjour-service-1.2.1.tgz", + "integrity": "sha512-oSzCS2zV14bh2kji6vNe7vrpJYCHGvcZnlffFQ1MEoX/WOeQ/teD8SYWKR942OI3INjq8OMNJlbPK5LLLUxFDw==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.3", + "multicast-dns": "^7.2.5" + } + }, + "node_modules/boolbase": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/boolbase/-/boolbase-1.0.0.tgz", + "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==", + "dev": true + }, + "node_modules/boxen": { + "version": "5.1.2", + "resolved": "https://registry.npmmirror.com/boxen/-/boxen-5.1.2.tgz", + "integrity": "sha512-9gYgQKXx+1nP8mP7CzFyaUARhg7D3n1dF/FnErWmu9l6JvGpNUN278h0aSb+QjoiKSWG+iZ3uHrcqk0qrY9RQQ==", + "dependencies": { + "ansi-align": "^3.0.0", + "camelcase": "^6.2.0", + "chalk": "^4.1.0", + "cli-boxes": "^2.2.1", + "string-width": "^4.2.2", + "type-fest": "^0.20.2", + "widest-line": "^3.1.0", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/boxen/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/boxen/node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmmirror.com/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/boxen/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/boxen/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/boxen/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/boxen/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/boxen/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmmirror.com/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/brorand": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/brorand/-/brorand-1.1.0.tgz", + "integrity": "sha512-cKV8tMCEpQs4hK/ik71d6LrPOnpkpGBR0wzxqr68g2m/LB2GxVYQroAjMJZRVM1Y4BCjCKc3vAamxSzOY2RP+w==", + "peer": true + }, + "node_modules/browser-process-hrtime": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/browser-process-hrtime/-/browser-process-hrtime-1.0.0.tgz", + "integrity": "sha512-9o5UecI3GhkpM6DrXr69PblIuWxPKk9Y0jHBRhdocZ2y7YECBFCsHm79Pr3OyR2AvjhDkabFJaDJMYRazHgsow==", + "dev": true + }, + "node_modules/browserify-aes": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/browserify-aes/-/browserify-aes-1.2.0.tgz", + "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==", + "peer": true, + "dependencies": { + "buffer-xor": "^1.0.3", + "cipher-base": "^1.0.0", + "create-hash": "^1.1.0", + "evp_bytestokey": "^1.0.3", + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/browserify-cipher": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/browserify-cipher/-/browserify-cipher-1.0.1.tgz", + "integrity": "sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==", + "peer": true, + "dependencies": { + "browserify-aes": "^1.0.4", + "browserify-des": "^1.0.0", + "evp_bytestokey": "^1.0.0" + } + }, + "node_modules/browserify-des": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/browserify-des/-/browserify-des-1.0.2.tgz", + "integrity": "sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A==", + "peer": true, + "dependencies": { + "cipher-base": "^1.0.1", + "des.js": "^1.0.0", + "inherits": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "node_modules/browserify-rsa": { + "version": "4.1.0", + "resolved": "https://registry.npmmirror.com/browserify-rsa/-/browserify-rsa-4.1.0.tgz", + "integrity": "sha512-AdEER0Hkspgno2aR97SAf6vi0y0k8NuOpGnVH3O99rcA5Q6sh8QxcngtHuJ6uXwnfAXNM4Gn1Gb7/MV1+Ymbog==", + "peer": true, + "dependencies": { + "bn.js": "^5.0.0", + "randombytes": "^2.0.1" + } + }, + "node_modules/browserify-sign": { + "version": "4.2.3", + "resolved": "https://registry.npmmirror.com/browserify-sign/-/browserify-sign-4.2.3.tgz", + "integrity": "sha512-JWCZW6SKhfhjJxO8Tyiiy+XYB7cqd2S5/+WeYHsKdNKFlCBhKbblba1A/HN/90YwtxKc8tCErjffZl++UNmGiw==", + "peer": true, + "dependencies": { + "bn.js": "^5.2.1", + "browserify-rsa": "^4.1.0", + "create-hash": "^1.2.0", + "create-hmac": "^1.1.7", + "elliptic": "^6.5.5", + "hash-base": "~3.0", + "inherits": "^2.0.4", + "parse-asn1": "^5.1.7", + "readable-stream": "^2.3.8", + "safe-buffer": "^5.2.1" + }, + "engines": { + "node": ">= 0.12" + } + }, + "node_modules/browserify-zlib": { + "version": "0.2.0", + "resolved": "https://registry.npmmirror.com/browserify-zlib/-/browserify-zlib-0.2.0.tgz", + "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==", + "peer": true, + "dependencies": { + "pako": "~1.0.5" + } + }, + "node_modules/browserslist": { + "version": "4.23.0", + "resolved": "https://registry.npmmirror.com/browserslist/-/browserslist-4.23.0.tgz", + "integrity": "sha512-QW8HiM1shhT2GuzkvklfjcKDiWFXHOeFCIA/huJPwHsslwcydgk7X+z2zXpEijP98UCY7HbubZt5J2Zgvf0CaQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "caniuse-lite": "^1.0.30001587", + "electron-to-chromium": "^1.4.668", + "node-releases": "^2.0.14", + "update-browserslist-db": "^1.0.13" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/bs-logger": { + "version": "0.2.6", + "resolved": "https://registry.npmmirror.com/bs-logger/-/bs-logger-0.2.6.tgz", + "integrity": "sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==", + "dev": true, + "dependencies": { + "fast-json-stable-stringify": "2.x" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/bser": { + "version": "2.1.1", + "resolved": "https://registry.npmmirror.com/bser/-/bser-2.1.1.tgz", + "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==", + "dev": true, + "dependencies": { + "node-int64": "^0.4.0" + } + }, + "node_modules/buffer": { + "version": "5.7.1", + "resolved": "https://registry.npmmirror.com/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, + "node_modules/buffer-alloc": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/buffer-alloc/-/buffer-alloc-1.2.0.tgz", + "integrity": "sha512-CFsHQgjtW1UChdXgbyJGtnm+O/uLQeZdtbDo8mfUgYXCHSM1wgrVxXm6bSyrUuErEb+4sYVGCzASBRot7zyrow==", + "dependencies": { + "buffer-alloc-unsafe": "^1.1.0", + "buffer-fill": "^1.0.0" + } + }, + "node_modules/buffer-alloc-unsafe": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz", + "integrity": "sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg==" + }, + "node_modules/buffer-crc32": { + "version": "0.2.13", + "resolved": "https://registry.npmmirror.com/buffer-crc32/-/buffer-crc32-0.2.13.tgz", + "integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==", + "engines": { + "node": "*" + } + }, + "node_modules/buffer-fill": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/buffer-fill/-/buffer-fill-1.0.0.tgz", + "integrity": "sha512-T7zexNBwiiaCOGDg9xNX9PBmjrubblRkENuptryuI64URkXDFum9il/JGL8Lm8wYfAXpredVXXZz7eMHilimiQ==" + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmmirror.com/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==" + }, + "node_modules/buffer-xor": { + "version": "1.0.3", + "resolved": "https://registry.npmmirror.com/buffer-xor/-/buffer-xor-1.0.3.tgz", + "integrity": "sha512-571s0T7nZWK6vB67HI5dyUF7wXiNcfaPPPTl6zYCNApANjIvYJTg7hlud/+cJpdAhS7dVzqMLmfhfHR3rAcOjQ==", + "peer": true + }, + "node_modules/bufferutil": { + "version": "4.0.8", + "resolved": "https://registry.npmmirror.com/bufferutil/-/bufferutil-4.0.8.tgz", + "integrity": "sha512-4T53u4PdgsXqKaIctwF8ifXlRTTmEPJ8iEPWFdGZvcf7sbwYo6FKFEX9eNNAnzFZ7EzJAQ3CJeOtCRA4rDp7Pw==", + "hasInstallScript": true, + "dependencies": { + "node-gyp-build": "^4.3.0" + }, + "engines": { + "node": ">=6.14.2" + } + }, + "node_modules/builtin-status-codes": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz", + "integrity": "sha512-HpGFw18DgFWlncDfjTa2rcQ4W88O1mC8e8yZ2AvQY5KDaktSTwo+KRf6nHK6FRI5FyRyb/5T6+TSxfP7QyGsmQ==", + "peer": true + }, + "node_modules/builtins": { + "version": "1.0.3", + "resolved": "https://registry.npmmirror.com/builtins/-/builtins-1.0.3.tgz", + "integrity": "sha512-uYBjakWipfaO/bXI7E8rq6kpwHRZK5cNYrUv2OzZSI/FvmdMyXJ2tG9dKcjEC5YHmHpUAwsargWIZNWdxb/bnQ==" + }, + "node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmmirror.com/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/cacache": { + "version": "12.0.4", + "resolved": "https://registry.npmmirror.com/cacache/-/cacache-12.0.4.tgz", + "integrity": "sha512-a0tMB40oefvuInr4Cwb3GerbL9xTj1D5yg0T5xrjGCGyfvbxseIXX7BAO/u/hIXdafzOI5JC3wDwHyf24buOAQ==", + "peer": true, + "dependencies": { + "bluebird": "^3.5.5", + "chownr": "^1.1.1", + "figgy-pudding": "^3.5.1", + "glob": "^7.1.4", + "graceful-fs": "^4.1.15", + "infer-owner": "^1.0.3", + "lru-cache": "^5.1.1", + "mississippi": "^3.0.0", + "mkdirp": "^0.5.1", + "move-concurrently": "^1.0.1", + "promise-inflight": "^1.0.1", + "rimraf": "^2.6.3", + "ssri": "^6.0.1", + "unique-filename": "^1.1.1", + "y18n": "^4.0.0" + } + }, + "node_modules/cacache/node_modules/rimraf": { + "version": "2.7.1", + "resolved": "https://registry.npmmirror.com/rimraf/-/rimraf-2.7.1.tgz", + "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", + "peer": true, + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + } + }, + "node_modules/cacache/node_modules/ssri": { + "version": "6.0.2", + "resolved": "https://registry.npmmirror.com/ssri/-/ssri-6.0.2.tgz", + "integrity": "sha512-cepbSq/neFK7xB6A50KHN0xHDotYzq58wWCa5LeWqnPrHG8GzfEjO/4O8kpmcGW+oaxkvhEJCWgbgNk4/ZV93Q==", + "peer": true, + "dependencies": { + "figgy-pudding": "^3.5.1" + } + }, + "node_modules/cacache/node_modules/y18n": { + "version": "4.0.3", + "resolved": "https://registry.npmmirror.com/y18n/-/y18n-4.0.3.tgz", + "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==", + "peer": true + }, + "node_modules/cache-base": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/cache-base/-/cache-base-1.0.1.tgz", + "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==", + "dependencies": { + "collection-visit": "^1.0.0", + "component-emitter": "^1.2.1", + "get-value": "^2.0.6", + "has-value": "^1.0.0", + "isobject": "^3.0.1", + "set-value": "^2.0.0", + "to-object-path": "^0.3.0", + "union-value": "^1.0.0", + "unset-value": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/cacheable-request": { + "version": "2.1.4", + "resolved": "https://registry.npmmirror.com/cacheable-request/-/cacheable-request-2.1.4.tgz", + "integrity": "sha512-vag0O2LKZ/najSoUwDbVlnlCFvhBE/7mGTY2B5FgCBDcRD+oVV1HYTOwM6JZfMg/hIcM6IwnTZ1uQQL5/X3xIQ==", + "dependencies": { + "clone-response": "1.0.2", + "get-stream": "3.0.0", + "http-cache-semantics": "3.8.1", + "keyv": "3.0.0", + "lowercase-keys": "1.0.0", + "normalize-url": "2.0.1", + "responselike": "1.0.2" + } + }, + "node_modules/cacheable-request/node_modules/json-buffer": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/json-buffer/-/json-buffer-3.0.0.tgz", + "integrity": "sha512-CuUqjv0FUZIdXkHPI8MezCnFCdaTAacej1TZYulLoAg1h/PhwkdXFN4V/gzY4g+fMBCOV2xF+rp7t2XD2ns/NQ==" + }, + "node_modules/cacheable-request/node_modules/keyv": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/keyv/-/keyv-3.0.0.tgz", + "integrity": "sha512-eguHnq22OE3uVoSYG0LVWNP+4ppamWr9+zWBe1bsNcovIMy6huUJFPgy4mGwCd/rnl3vOLGW1MTlu4c57CT1xA==", + "dependencies": { + "json-buffer": "3.0.0" + } + }, + "node_modules/cacheable-request/node_modules/lowercase-keys": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/lowercase-keys/-/lowercase-keys-1.0.0.tgz", + "integrity": "sha512-RPlX0+PHuvxVDZ7xX+EBVAp4RsVxP/TdDSN2mJYdiq1Lc4Hz7EUSjUI7RZrKKlmrIzVhf6Jo2stj7++gVarS0A==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/call-bind": { + "version": "1.0.7", + "resolved": "https://registry.npmmirror.com/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/camel-case": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/camel-case/-/camel-case-4.1.2.tgz", + "integrity": "sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw==", + "dev": true, + "dependencies": { + "pascal-case": "^3.1.2", + "tslib": "^2.0.3" + } + }, + "node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmmirror.com/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/caniuse-api": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/caniuse-api/-/caniuse-api-3.0.0.tgz", + "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", + "dev": true, + "dependencies": { + "browserslist": "^4.0.0", + "caniuse-lite": "^1.0.0", + "lodash.memoize": "^4.1.2", + "lodash.uniq": "^4.5.0" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001620", + "resolved": "https://registry.npmmirror.com/caniuse-lite/-/caniuse-lite-1.0.30001620.tgz", + "integrity": "sha512-WJvYsOjd1/BYUY6SNGUosK9DUidBPDTnOARHp3fSmFO1ekdxaY6nKRttEVrfMmYi80ctS0kz1wiWmm14fVc3ew==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ] + }, + "node_modules/canvas": { + "version": "2.11.2", + "resolved": "https://registry.npmmirror.com/canvas/-/canvas-2.11.2.tgz", + "integrity": "sha512-ItanGBMrmRV7Py2Z+Xhs7cT+FNt5K0vPL4p9EZ/UX/Mu7hFbkxSjKF2KVtPwX7UYWp7dRKnrTvReflgrItJbdw==", + "hasInstallScript": true, + "optional": true, + "dependencies": { + "@mapbox/node-pre-gyp": "^1.0.0", + "nan": "^2.17.0", + "simple-get": "^3.0.3" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/case-sensitive-paths-webpack-plugin": { + "version": "2.4.0", + "resolved": "https://registry.npmmirror.com/case-sensitive-paths-webpack-plugin/-/case-sensitive-paths-webpack-plugin-2.4.0.tgz", + "integrity": "sha512-roIFONhcxog0JSSWbvVAh3OocukmSgpqOH6YpMkCvav/ySIV3JKg4Dc8vYtQjYi/UxpNE36r/9v+VqTQqgkYmw==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/caw": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/caw/-/caw-2.0.1.tgz", + "integrity": "sha512-Cg8/ZSBEa8ZVY9HspcGUYaK63d/bN7rqS3CYCzEGUxuYv6UlmcjzDUz2fCFFHyTvUW5Pk0I+3hkA3iXlIj6guA==", + "dependencies": { + "get-proxy": "^2.0.0", + "isurl": "^1.0.0-alpha5", + "tunnel-agent": "^0.6.0", + "url-to-options": "^1.0.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/chardet": { + "version": "0.7.0", + "resolved": "https://registry.npmmirror.com/chardet/-/chardet-0.7.0.tgz", + "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==" + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmmirror.com/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "devOptional": true, + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmmirror.com/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "devOptional": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/chownr": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/chownr/-/chownr-1.1.4.tgz", + "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==", + "peer": true + }, + "node_modules/chrome-trace-event": { + "version": "1.0.3", + "resolved": "https://registry.npmmirror.com/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz", + "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==", + "engines": { + "node": ">=6.0" + } + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmmirror.com/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "engines": { + "node": ">=8" + } + }, + "node_modules/cipher-base": { + "version": "1.0.4", + "resolved": "https://registry.npmmirror.com/cipher-base/-/cipher-base-1.0.4.tgz", + "integrity": "sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==", + "peer": true, + "dependencies": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/cjs-module-lexer": { + "version": "1.3.1", + "resolved": "https://registry.npmmirror.com/cjs-module-lexer/-/cjs-module-lexer-1.3.1.tgz", + "integrity": "sha512-a3KdPAANPbNE4ZUv9h6LckSl9zLsYOP4MBmhIPkRaeyybt+r4UghLvq+xw/YwUcC1gqylCkL4rdVs3Lwupjm4Q==", + "dev": true + }, + "node_modules/class-utils": { + "version": "0.3.6", + "resolved": "https://registry.npmmirror.com/class-utils/-/class-utils-0.3.6.tgz", + "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==", + "dependencies": { + "arr-union": "^3.1.0", + "define-property": "^0.2.5", + "isobject": "^3.0.0", + "static-extend": "^0.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/class-utils/node_modules/define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmmirror.com/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha512-Rr7ADjQZenceVOAKop6ALkkRAmH1A4Gx9hV/7ZujPUN2rkATqFO0JZLZInbAjpZYoJ1gUx8MRMQVkYemcbMSTA==", + "dependencies": { + "is-descriptor": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/class-utils/node_modules/is-descriptor": { + "version": "0.1.7", + "resolved": "https://registry.npmmirror.com/is-descriptor/-/is-descriptor-0.1.7.tgz", + "integrity": "sha512-C3grZTvObeN1xud4cRWl366OMXZTj0+HGyk4hvfpx4ZHt1Pb60ANSXqCK7pdOTeUQpRzECBSTphqvD7U+l22Eg==", + "dependencies": { + "is-accessor-descriptor": "^1.0.1", + "is-data-descriptor": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/clean-css": { + "version": "5.3.3", + "resolved": "https://registry.npmmirror.com/clean-css/-/clean-css-5.3.3.tgz", + "integrity": "sha512-D5J+kHaVb/wKSFcyyV75uCn8fiY4sV38XJoe4CUyGQ+mOU/fMVYUdH1hJC+CJQ5uY3EnW27SbJYS4X8BiLrAFg==", + "dev": true, + "dependencies": { + "source-map": "~0.6.0" + }, + "engines": { + "node": ">= 10.0" + } + }, + "node_modules/clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmmirror.com/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "engines": { + "node": ">=6" + } + }, + "node_modules/cli-boxes": { + "version": "2.2.1", + "resolved": "https://registry.npmmirror.com/cli-boxes/-/cli-boxes-2.2.1.tgz", + "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==", + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/cli-cursor/-/cli-cursor-3.1.0.tgz", + "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", + "dependencies": { + "restore-cursor": "^3.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cli-highlight": { + "version": "2.1.11", + "resolved": "https://registry.npmmirror.com/cli-highlight/-/cli-highlight-2.1.11.tgz", + "integrity": "sha512-9KDcoEVwyUXrjcJNvHD0NFc/hiwe/WPVYIleQh2O1N2Zro5gWJZ/K+3DGn8w8P/F6FxOgzyC5bxDyHIgCSPhGg==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "highlight.js": "^10.7.1", + "mz": "^2.4.0", + "parse5": "^5.1.1", + "parse5-htmlparser2-tree-adapter": "^6.0.0", + "yargs": "^16.0.0" + }, + "bin": { + "highlight": "bin/highlight" + }, + "engines": { + "node": ">=8.0.0", + "npm": ">=5.0.0" + } + }, + "node_modules/cli-highlight/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/cli-highlight/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/cli-highlight/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/cli-highlight/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/cli-highlight/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/cli-highlight/node_modules/highlight.js": { + "version": "10.7.3", + "resolved": "https://registry.npmmirror.com/highlight.js/-/highlight.js-10.7.3.tgz", + "integrity": "sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A==", + "dev": true, + "engines": { + "node": "*" + } + }, + "node_modules/cli-highlight/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cli-spinners": { + "version": "2.9.2", + "resolved": "https://registry.npmmirror.com/cli-spinners/-/cli-spinners-2.9.2.tgz", + "integrity": "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==", + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-width": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/cli-width/-/cli-width-3.0.0.tgz", + "integrity": "sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/clipboardy": { + "version": "2.3.0", + "resolved": "https://registry.npmmirror.com/clipboardy/-/clipboardy-2.3.0.tgz", + "integrity": "sha512-mKhiIL2DrQIsuXMgBgnfEHOZOryC7kY7YO//TN6c63wlEm3NG5tz+YgY5rVi29KCmq/QQjKYvM7a19+MDOTHOQ==", + "dev": true, + "dependencies": { + "arch": "^2.1.1", + "execa": "^1.0.0", + "is-wsl": "^2.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmmirror.com/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, + "node_modules/clone": { + "version": "2.1.2", + "resolved": "https://registry.npmmirror.com/clone/-/clone-2.1.2.tgz", + "integrity": "sha512-3Pe/CF1Nn94hyhIYpjtiLhdCoEoz0DqQ+988E9gmeEdQZlojxnOb74wctFyuwWQHzqyf9X7C7MG8juUpqBJT8w==", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/clone-deep": { + "version": "4.0.1", + "resolved": "https://registry.npmmirror.com/clone-deep/-/clone-deep-4.0.1.tgz", + "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", + "dependencies": { + "is-plain-object": "^2.0.4", + "kind-of": "^6.0.2", + "shallow-clone": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/clone-deep/node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmmirror.com/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/clone-response": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/clone-response/-/clone-response-1.0.2.tgz", + "integrity": "sha512-yjLXh88P599UOyPTFX0POsd7WxnbsVsGohcwzHOLspIhhpalPw1BcqED8NblyZLKcGrL8dTgMlcaZxV2jAD41Q==", + "dependencies": { + "mimic-response": "^1.0.0" + } + }, + "node_modules/co": { + "version": "4.6.0", + "resolved": "https://registry.npmmirror.com/co/-/co-4.6.0.tgz", + "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", + "dev": true, + "engines": { + "iojs": ">= 1.0.0", + "node": ">= 0.12.0" + } + }, + "node_modules/collect-v8-coverage": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz", + "integrity": "sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==", + "dev": true + }, + "node_modules/collection-visit": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/collection-visit/-/collection-visit-1.0.0.tgz", + "integrity": "sha512-lNkKvzEeMBBjUGHZ+q6z9pSJla0KWAQPvtzhEV9+iGyQYG+pBpl7xKDhxoNSOZH2hhv0v5k0y2yAM4o4SjoSkw==", + "dependencies": { + "map-visit": "^1.0.0", + "object-visit": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + }, + "node_modules/color-support": { + "version": "1.1.3", + "resolved": "https://registry.npmmirror.com/color-support/-/color-support-1.1.3.tgz", + "integrity": "sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==", + "optional": true, + "bin": { + "color-support": "bin.js" + } + }, + "node_modules/colord": { + "version": "2.9.3", + "resolved": "https://registry.npmmirror.com/colord/-/colord-2.9.3.tgz", + "integrity": "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw==", + "dev": true + }, + "node_modules/colorette": { + "version": "2.0.20", + "resolved": "https://registry.npmmirror.com/colorette/-/colorette-2.0.20.tgz", + "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==" + }, + "node_modules/colors": { + "version": "1.4.0", + "resolved": "https://registry.npmmirror.com/colors/-/colors-1.4.0.tgz", + "integrity": "sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA==", + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmmirror.com/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/commondir": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/commondir/-/commondir-1.0.1.tgz", + "integrity": "sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==" + }, + "node_modules/component-emitter": { + "version": "1.3.1", + "resolved": "https://registry.npmmirror.com/component-emitter/-/component-emitter-1.3.1.tgz", + "integrity": "sha512-T0+barUSQRTUQASh8bx02dl+DhF54GtIDY13Y3m9oWTklKbb3Wv974meRpeZ3lp1JpLVECWWNHC4vaG2XHXouQ==", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/compressible": { + "version": "2.0.18", + "resolved": "https://registry.npmmirror.com/compressible/-/compressible-2.0.18.tgz", + "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", + "dev": true, + "dependencies": { + "mime-db": ">= 1.43.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compression": { + "version": "1.7.4", + "resolved": "https://registry.npmmirror.com/compression/-/compression-1.7.4.tgz", + "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==", + "dev": true, + "dependencies": { + "accepts": "~1.3.5", + "bytes": "3.0.0", + "compressible": "~2.0.16", + "debug": "2.6.9", + "on-headers": "~1.0.2", + "safe-buffer": "5.1.2", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/compression/node_modules/bytes": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/bytes/-/bytes-3.0.0.tgz", + "integrity": "sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==", + "dev": true, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/compression/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmmirror.com/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dev": true, + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/compression/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "dev": true + }, + "node_modules/compression/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmmirror.com/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + }, + "node_modules/compute-scroll-into-view": { + "version": "1.0.20", + "resolved": "https://registry.npmmirror.com/compute-scroll-into-view/-/compute-scroll-into-view-1.0.20.tgz", + "integrity": "sha512-UCB0ioiyj8CRjtrvaceBLqqhZCVP+1B8+NWQhmdsm0VXOJtobBCf1dBQmebCCo34qZmUwZfIH2MZLqNHazrfjg==" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmmirror.com/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" + }, + "node_modules/concat-stream": { + "version": "1.6.2", + "resolved": "https://registry.npmmirror.com/concat-stream/-/concat-stream-1.6.2.tgz", + "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", + "engines": [ + "node >= 0.8" + ], + "peer": true, + "dependencies": { + "buffer-from": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^2.2.2", + "typedarray": "^0.0.6" + } + }, + "node_modules/condense-newlines": { + "version": "0.2.1", + "resolved": "https://registry.npmmirror.com/condense-newlines/-/condense-newlines-0.2.1.tgz", + "integrity": "sha512-P7X+QL9Hb9B/c8HI5BFFKmjgBu2XpQuF98WZ9XkO+dBGgk5XgwiQz7o1SmpglNWId3581UcS0SFAWfoIhMHPfg==", + "dev": true, + "dependencies": { + "extend-shallow": "^2.0.1", + "is-whitespace": "^0.3.0", + "kind-of": "^3.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/config-chain": { + "version": "1.1.13", + "resolved": "https://registry.npmmirror.com/config-chain/-/config-chain-1.1.13.tgz", + "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", + "dependencies": { + "ini": "^1.3.4", + "proto-list": "~1.2.1" + } + }, + "node_modules/config-chain/node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmmirror.com/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==" + }, + "node_modules/connect-history-api-fallback": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz", + "integrity": "sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==", + "dev": true, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/console-browserify": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/console-browserify/-/console-browserify-1.2.0.tgz", + "integrity": "sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA==", + "peer": true + }, + "node_modules/console-control-strings": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/console-control-strings/-/console-control-strings-1.1.0.tgz", + "integrity": "sha512-ty/fTekppD2fIwRvnZAVdeOiGd1c7YXEixbgJTNzqcxJWKQnjJ/V1bNEEE6hygpM3WjwHFUVK6HTjWSzV4a8sQ==", + "optional": true + }, + "node_modules/consolidate": { + "version": "0.15.1", + "resolved": "https://registry.npmmirror.com/consolidate/-/consolidate-0.15.1.tgz", + "integrity": "sha512-DW46nrsMJgy9kqAbPt5rKaCr7uFtpo4mSUvLHIUbJEjm0vo+aY5QLwBUq3FK4tRnJr/X0Psc0C4jf/h+HtXSMw==", + "deprecated": "Please upgrade to consolidate v1.0.0+ as it has been modernized with several long-awaited fixes implemented. Maintenance is supported by Forward Email at https://forwardemail.net ; follow/watch https://github.com/ladjs/consolidate for updates and release changelog", + "dev": true, + "dependencies": { + "bluebird": "^3.1.1" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/constants-browserify": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/constants-browserify/-/constants-browserify-1.0.0.tgz", + "integrity": "sha512-xFxOwqIzR/e1k1gLiWEophSCMqXcwVHIH7akf7b/vxcUeGunlj3hvZaaqxwHsTgn+IndtkQJgSztIDWeumWJDQ==", + "peer": true + }, + "node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmmirror.com/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmmirror.com/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==" + }, + "node_modules/cookie": { + "version": "0.6.0", + "resolved": "https://registry.npmmirror.com/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.6", + "resolved": "https://registry.npmmirror.com/cookie-signature/-/cookie-signature-1.0.6.tgz", + "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==" + }, + "node_modules/copy-concurrently": { + "version": "1.0.5", + "resolved": "https://registry.npmmirror.com/copy-concurrently/-/copy-concurrently-1.0.5.tgz", + "integrity": "sha512-f2domd9fsVDFtaFcbaRZuYXwtdmnzqbADSwhSWYxYB/Q8zsdUUFMXVRwXGDMWmbEzAn1kdRrtI1T/KTFOL4X2A==", + "peer": true, + "dependencies": { + "aproba": "^1.1.1", + "fs-write-stream-atomic": "^1.0.8", + "iferr": "^0.1.5", + "mkdirp": "^0.5.1", + "rimraf": "^2.5.4", + "run-queue": "^1.0.0" + } + }, + "node_modules/copy-concurrently/node_modules/rimraf": { + "version": "2.7.1", + "resolved": "https://registry.npmmirror.com/rimraf/-/rimraf-2.7.1.tgz", + "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", + "peer": true, + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + } + }, + "node_modules/copy-descriptor": { + "version": "0.1.1", + "resolved": "https://registry.npmmirror.com/copy-descriptor/-/copy-descriptor-0.1.1.tgz", + "integrity": "sha512-XgZ0pFcakEUlbwQEVNg3+QAis1FyTL3Qel9FYy8pSkQqoG3PNoT0bOCQtOXcOkur21r2Eq2kI+IE+gsmAEVlYw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/copy-webpack-plugin": { + "version": "9.1.0", + "resolved": "https://registry.npmmirror.com/copy-webpack-plugin/-/copy-webpack-plugin-9.1.0.tgz", + "integrity": "sha512-rxnR7PaGigJzhqETHGmAcxKnLZSR5u1Y3/bcIv/1FnqXedcL/E2ewK7ZCNrArJKCiSv8yVXhTqetJh8inDvfsA==", + "dev": true, + "dependencies": { + "fast-glob": "^3.2.7", + "glob-parent": "^6.0.1", + "globby": "^11.0.3", + "normalize-path": "^3.0.0", + "schema-utils": "^3.1.1", + "serialize-javascript": "^6.0.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + } + }, + "node_modules/copy-webpack-plugin/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmmirror.com/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/core-js": { + "version": "3.37.1", + "resolved": "https://registry.npmmirror.com/core-js/-/core-js-3.37.1.tgz", + "integrity": "sha512-Xn6qmxrQZyB0FFY8E3bgRXei3lWDJHhvI+u0q9TKIYM49G8pAr0FgnnrFRAmsbptZL1yxRADVXn+x5AGsbBfyw==", + "hasInstallScript": true, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-js-compat": { + "version": "3.37.1", + "resolved": "https://registry.npmmirror.com/core-js-compat/-/core-js-compat-3.37.1.tgz", + "integrity": "sha512-9TNiImhKvQqSUkOvk/mMRZzOANTiEVC7WaBNhHcKM7x+/5E1l5NvsysR19zuDQScE8k+kfQXWRN3AtS/eOSHpg==", + "dependencies": { + "browserslist": "^4.23.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmmirror.com/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" + }, + "node_modules/cors": { + "version": "2.8.5", + "resolved": "https://registry.npmmirror.com/cors/-/cors-2.8.5.tgz", + "integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==", + "dependencies": { + "object-assign": "^4", + "vary": "^1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/cosmiconfig": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/cosmiconfig/-/cosmiconfig-6.0.0.tgz", + "integrity": "sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg==", + "dev": true, + "dependencies": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.1.0", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.7.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/create-ecdh": { + "version": "4.0.4", + "resolved": "https://registry.npmmirror.com/create-ecdh/-/create-ecdh-4.0.4.tgz", + "integrity": "sha512-mf+TCx8wWc9VpuxfP2ht0iSISLZnt0JgWlrOKZiNqyUZWnjIaCIVNQArMHnCZKfEYRg6IM7A+NeJoN8gf/Ws0A==", + "peer": true, + "dependencies": { + "bn.js": "^4.1.0", + "elliptic": "^6.5.3" + } + }, + "node_modules/create-ecdh/node_modules/bn.js": { + "version": "4.12.0", + "resolved": "https://registry.npmmirror.com/bn.js/-/bn.js-4.12.0.tgz", + "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==", + "peer": true + }, + "node_modules/create-hash": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/create-hash/-/create-hash-1.2.0.tgz", + "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==", + "peer": true, + "dependencies": { + "cipher-base": "^1.0.1", + "inherits": "^2.0.1", + "md5.js": "^1.3.4", + "ripemd160": "^2.0.1", + "sha.js": "^2.4.0" + } + }, + "node_modules/create-hmac": { + "version": "1.1.7", + "resolved": "https://registry.npmmirror.com/create-hmac/-/create-hmac-1.1.7.tgz", + "integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==", + "peer": true, + "dependencies": { + "cipher-base": "^1.0.3", + "create-hash": "^1.1.0", + "inherits": "^2.0.1", + "ripemd160": "^2.0.0", + "safe-buffer": "^5.0.1", + "sha.js": "^2.4.8" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmmirror.com/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/crypto-browserify": { + "version": "3.12.0", + "resolved": "https://registry.npmmirror.com/crypto-browserify/-/crypto-browserify-3.12.0.tgz", + "integrity": "sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==", + "peer": true, + "dependencies": { + "browserify-cipher": "^1.0.0", + "browserify-sign": "^4.0.0", + "create-ecdh": "^4.0.0", + "create-hash": "^1.1.0", + "create-hmac": "^1.1.0", + "diffie-hellman": "^5.0.0", + "inherits": "^2.0.1", + "pbkdf2": "^3.0.3", + "public-encrypt": "^4.0.0", + "randombytes": "^2.0.0", + "randomfill": "^1.0.3" + }, + "engines": { + "node": "*" + } + }, + "node_modules/css": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/css/-/css-3.0.0.tgz", + "integrity": "sha512-DG9pFfwOrzc+hawpmqX/dHYHJG+Bsdb0klhyi1sDneOgGOXy9wQIC8hzyVp1e4NRYDBdxcylvywPkkXCHAzTyQ==", + "dev": true, + "dependencies": { + "inherits": "^2.0.4", + "source-map": "^0.6.1", + "source-map-resolve": "^0.6.0" + } + }, + "node_modules/css-declaration-sorter": { + "version": "6.4.1", + "resolved": "https://registry.npmmirror.com/css-declaration-sorter/-/css-declaration-sorter-6.4.1.tgz", + "integrity": "sha512-rtdthzxKuyq6IzqX6jEcIzQF/YqccluefyCYheovBOLhFT/drQA9zj/UbRAa9J7C0o6EG6u3E6g+vKkay7/k3g==", + "dev": true, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.0.9" + } + }, + "node_modules/css-loader": { + "version": "6.11.0", + "resolved": "https://registry.npmmirror.com/css-loader/-/css-loader-6.11.0.tgz", + "integrity": "sha512-CTJ+AEQJjq5NzLga5pE39qdiSV56F8ywCIsqNIRF0r7BDgWsN25aazToqAFg7ZrtA/U016xudB3ffgweORxX7g==", + "dev": true, + "dependencies": { + "icss-utils": "^5.1.0", + "postcss": "^8.4.33", + "postcss-modules-extract-imports": "^3.1.0", + "postcss-modules-local-by-default": "^4.0.5", + "postcss-modules-scope": "^3.2.0", + "postcss-modules-values": "^4.0.0", + "postcss-value-parser": "^4.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "@rspack/core": "0.x || 1.x", + "webpack": "^5.0.0" + }, + "peerDependenciesMeta": { + "@rspack/core": { + "optional": true + }, + "webpack": { + "optional": true + } + } + }, + "node_modules/css-loader/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/css-minimizer-webpack-plugin": { + "version": "3.4.1", + "resolved": "https://registry.npmmirror.com/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-3.4.1.tgz", + "integrity": "sha512-1u6D71zeIfgngN2XNRJefc/hY7Ybsxd74Jm4qngIXyUEk7fss3VUzuHxLAq/R8NAba4QU9OUSaMZlbpRc7bM4Q==", + "dev": true, + "dependencies": { + "cssnano": "^5.0.6", + "jest-worker": "^27.0.2", + "postcss": "^8.3.5", + "schema-utils": "^4.0.0", + "serialize-javascript": "^6.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + }, + "peerDependenciesMeta": { + "@parcel/css": { + "optional": true + }, + "clean-css": { + "optional": true + }, + "csso": { + "optional": true + }, + "esbuild": { + "optional": true + } + } + }, + "node_modules/css-minimizer-webpack-plugin/node_modules/ajv": { + "version": "8.13.0", + "resolved": "https://registry.npmmirror.com/ajv/-/ajv-8.13.0.tgz", + "integrity": "sha512-PRA911Blj99jR5RMeTunVbNXMF6Lp4vZXnk5GQjcnUWUTsrXtekg/pnmFFI2u/I36Y/2bITGS30GZCXei6uNkA==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.3", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.4.1" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/css-minimizer-webpack-plugin/node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/css-minimizer-webpack-plugin/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "dev": true + }, + "node_modules/css-minimizer-webpack-plugin/node_modules/schema-utils": { + "version": "4.2.0", + "resolved": "https://registry.npmmirror.com/schema-utils/-/schema-utils-4.2.0.tgz", + "integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/css-select": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/css-select/-/css-select-4.3.0.tgz", + "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==", + "dev": true, + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.0.1", + "domhandler": "^4.3.1", + "domutils": "^2.8.0", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/css-tree": { + "version": "2.3.1", + "resolved": "https://registry.npmmirror.com/css-tree/-/css-tree-2.3.1.tgz", + "integrity": "sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw==", + "dev": true, + "dependencies": { + "mdn-data": "2.0.30", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" + } + }, + "node_modules/css-what": { + "version": "6.1.0", + "resolved": "https://registry.npmmirror.com/css-what/-/css-what-6.1.0.tgz", + "integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==", + "dev": true, + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "dev": true, + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/cssfilter": { + "version": "0.0.10", + "resolved": "https://registry.npmmirror.com/cssfilter/-/cssfilter-0.0.10.tgz", + "integrity": "sha512-FAaLDaplstoRsDR8XGYH51znUN0UY7nMc6Z9/fvE8EXGwvJE9hu7W2vHwx1+bd6gCYnln9nLbzxFTrcO9YQDZw==" + }, + "node_modules/cssnano": { + "version": "5.1.15", + "resolved": "https://registry.npmmirror.com/cssnano/-/cssnano-5.1.15.tgz", + "integrity": "sha512-j+BKgDcLDQA+eDifLx0EO4XSA56b7uut3BQFH+wbSaSTuGLuiyTa/wbRYthUXX8LC9mLg+WWKe8h+qJuwTAbHw==", + "dev": true, + "dependencies": { + "cssnano-preset-default": "^5.2.14", + "lilconfig": "^2.0.3", + "yaml": "^1.10.2" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/cssnano" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/cssnano-preset-default": { + "version": "5.2.14", + "resolved": "https://registry.npmmirror.com/cssnano-preset-default/-/cssnano-preset-default-5.2.14.tgz", + "integrity": "sha512-t0SFesj/ZV2OTylqQVOrFgEh5uanxbO6ZAdeCrNsUQ6fVuXwYTxJPNAGvGTxHbD68ldIJNec7PyYZDBrfDQ+6A==", + "dev": true, + "dependencies": { + "css-declaration-sorter": "^6.3.1", + "cssnano-utils": "^3.1.0", + "postcss-calc": "^8.2.3", + "postcss-colormin": "^5.3.1", + "postcss-convert-values": "^5.1.3", + "postcss-discard-comments": "^5.1.2", + "postcss-discard-duplicates": "^5.1.0", + "postcss-discard-empty": "^5.1.1", + "postcss-discard-overridden": "^5.1.0", + "postcss-merge-longhand": "^5.1.7", + "postcss-merge-rules": "^5.1.4", + "postcss-minify-font-values": "^5.1.0", + "postcss-minify-gradients": "^5.1.1", + "postcss-minify-params": "^5.1.4", + "postcss-minify-selectors": "^5.2.1", + "postcss-normalize-charset": "^5.1.0", + "postcss-normalize-display-values": "^5.1.0", + "postcss-normalize-positions": "^5.1.1", + "postcss-normalize-repeat-style": "^5.1.1", + "postcss-normalize-string": "^5.1.0", + "postcss-normalize-timing-functions": "^5.1.0", + "postcss-normalize-unicode": "^5.1.1", + "postcss-normalize-url": "^5.1.0", + "postcss-normalize-whitespace": "^5.1.1", + "postcss-ordered-values": "^5.1.3", + "postcss-reduce-initial": "^5.1.2", + "postcss-reduce-transforms": "^5.1.0", + "postcss-svgo": "^5.1.0", + "postcss-unique-selectors": "^5.1.1" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/cssnano-utils": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/cssnano-utils/-/cssnano-utils-3.1.0.tgz", + "integrity": "sha512-JQNR19/YZhz4psLX/rQ9M83e3z2Wf/HdJbryzte4a3NSuafyp9w/I4U+hx5C2S9g41qlstH7DEWnZaaj83OuEA==", + "dev": true, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/csso": { + "version": "4.2.0", + "resolved": "https://registry.npmmirror.com/csso/-/csso-4.2.0.tgz", + "integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==", + "dev": true, + "dependencies": { + "css-tree": "^1.1.2" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/csso/node_modules/css-tree": { + "version": "1.1.3", + "resolved": "https://registry.npmmirror.com/css-tree/-/css-tree-1.1.3.tgz", + "integrity": "sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q==", + "dev": true, + "dependencies": { + "mdn-data": "2.0.14", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/csso/node_modules/mdn-data": { + "version": "2.0.14", + "resolved": "https://registry.npmmirror.com/mdn-data/-/mdn-data-2.0.14.tgz", + "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==", + "dev": true + }, + "node_modules/cssom": { + "version": "0.4.4", + "resolved": "https://registry.npmmirror.com/cssom/-/cssom-0.4.4.tgz", + "integrity": "sha512-p3pvU7r1MyyqbTk+WbNJIgJjG2VmTIaB10rI93LzVPrmDJKkzKYMtxxyAvQXR/NS6otuzveI7+7BBq3SjBS2mw==", + "dev": true + }, + "node_modules/cssstyle": { + "version": "2.3.0", + "resolved": "https://registry.npmmirror.com/cssstyle/-/cssstyle-2.3.0.tgz", + "integrity": "sha512-AZL67abkUzIuvcHqk7c09cezpGNcxUxU4Ioi/05xHk4DQeTkWmGYftIE6ctU6AEt+Gn4n1lDStOtj7FKycP71A==", + "dev": true, + "dependencies": { + "cssom": "~0.3.6" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cssstyle/node_modules/cssom": { + "version": "0.3.8", + "resolved": "https://registry.npmmirror.com/cssom/-/cssom-0.3.8.tgz", + "integrity": "sha512-b0tGHbfegbhPJpxpiBPU2sCkigAqtM9O121le6bbOlgyV+NyGyCmVfJ6QW9eRjz8CpNfWEOYBIMIGRYkLwsIYg==", + "dev": true + }, + "node_modules/csstype": { + "version": "3.1.3", + "resolved": "https://registry.npmmirror.com/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==" + }, + "node_modules/cyclist": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/cyclist/-/cyclist-1.0.2.tgz", + "integrity": "sha512-0sVXIohTfLqVIW3kb/0n6IiWF3Ifj5nm2XaSrLq2DI6fKIGa2fYAZdk917rUneaeLVpYfFcyXE2ft0fe3remsA==", + "peer": true + }, + "node_modules/d": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/d/-/d-1.0.2.tgz", + "integrity": "sha512-MOqHvMWF9/9MX6nza0KgvFH4HpMU0EF5uUDXqX/BtxtU8NfB0QzRtJ8Oe/6SuS4kbhyzVJwjd97EA4PKrzJ8bw==", + "dependencies": { + "es5-ext": "^0.10.64", + "type": "^2.7.2" + }, + "engines": { + "node": ">=0.12" + } + }, + "node_modules/data-urls": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/data-urls/-/data-urls-2.0.0.tgz", + "integrity": "sha512-X5eWTSXO/BJmpdIKCRuKUgSCgAN0OwliVK3yPKbwIWU1Tdw5BRajxlzMidvh+gwko9AfQ9zIj52pzF91Q3YAvQ==", + "dev": true, + "dependencies": { + "abab": "^2.0.3", + "whatwg-mimetype": "^2.3.0", + "whatwg-url": "^8.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/data-urls/node_modules/whatwg-mimetype": { + "version": "2.3.0", + "resolved": "https://registry.npmmirror.com/whatwg-mimetype/-/whatwg-mimetype-2.3.0.tgz", + "integrity": "sha512-M4yMwr6mAnQz76TbJm914+gPpB/nCwvZbJU28cUD6dR004SAxDLOOSUaB1JDRqLtaOV/vi0IC5lEAGFgrjGv/g==", + "dev": true + }, + "node_modules/dayjs": { + "version": "1.11.11", + "resolved": "https://registry.npmmirror.com/dayjs/-/dayjs-1.11.11.tgz", + "integrity": "sha512-okzr3f11N6WuqYtZSvm+F776mB41wRZMhKP+hc34YdW+KmtYYK9iqvHSwo2k9FEH3fhGXvOPV6yz2IcSrfRUDg==" + }, + "node_modules/debounce": { + "version": "1.2.1", + "resolved": "https://registry.npmmirror.com/debounce/-/debounce-1.2.1.tgz", + "integrity": "sha512-XRRe6Glud4rd/ZGQfiV1ruXSfbvfJedlV9Y6zOlP+2K04vBYiJEte6stfFkCP03aMnY5tsipamumUjL14fofug==", + "dev": true + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmmirror.com/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decimal.js": { + "version": "10.4.3", + "resolved": "https://registry.npmmirror.com/decimal.js/-/decimal.js-10.4.3.tgz", + "integrity": "sha512-VBBaLc1MgL5XpzgIP7ny5Z6Nx3UrRkIViUkPUdtl9aya5amy3De1gsUUSB1g3+3sExYNjCAsAznmukyxCb1GRA==", + "dev": true + }, + "node_modules/decode-uri-component": { + "version": "0.2.2", + "resolved": "https://registry.npmmirror.com/decode-uri-component/-/decode-uri-component-0.2.2.tgz", + "integrity": "sha512-FqUYQ+8o158GyGTrMFJms9qh3CqTKvAqgqsTnkLI8sKu0028orqBhxNMFkFen0zGyg6epACD32pjVk58ngIErQ==", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/decompress": { + "version": "4.2.1", + "resolved": "https://registry.npmmirror.com/decompress/-/decompress-4.2.1.tgz", + "integrity": "sha512-e48kc2IjU+2Zw8cTb6VZcJQ3lgVbS4uuB1TfCHbiZIP/haNXm+SVyhu+87jts5/3ROpd82GSVCoNs/z8l4ZOaQ==", + "dependencies": { + "decompress-tar": "^4.0.0", + "decompress-tarbz2": "^4.0.0", + "decompress-targz": "^4.0.0", + "decompress-unzip": "^4.0.1", + "graceful-fs": "^4.1.10", + "make-dir": "^1.0.0", + "pify": "^2.3.0", + "strip-dirs": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/decompress-response": { + "version": "3.3.0", + "resolved": "https://registry.npmmirror.com/decompress-response/-/decompress-response-3.3.0.tgz", + "integrity": "sha512-BzRPQuY1ip+qDonAOz42gRm/pg9F768C+npV/4JOsxRC2sq+Rlk+Q4ZCAsOhnIaMrgarILY+RMUIvMmmX1qAEA==", + "dependencies": { + "mimic-response": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/decompress-tar": { + "version": "4.1.1", + "resolved": "https://registry.npmmirror.com/decompress-tar/-/decompress-tar-4.1.1.tgz", + "integrity": "sha512-JdJMaCrGpB5fESVyxwpCx4Jdj2AagLmv3y58Qy4GE6HMVjWz1FeVQk1Ct4Kye7PftcdOo/7U7UKzYBJgqnGeUQ==", + "dependencies": { + "file-type": "^5.2.0", + "is-stream": "^1.1.0", + "tar-stream": "^1.5.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/decompress-tar/node_modules/file-type": { + "version": "5.2.0", + "resolved": "https://registry.npmmirror.com/file-type/-/file-type-5.2.0.tgz", + "integrity": "sha512-Iq1nJ6D2+yIO4c8HHg4fyVb8mAJieo1Oloy1mLLaB2PvezNedhBVm+QU7g0qM42aiMbRXTxKKwGD17rjKNJYVQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/decompress-tarbz2": { + "version": "4.1.1", + "resolved": "https://registry.npmmirror.com/decompress-tarbz2/-/decompress-tarbz2-4.1.1.tgz", + "integrity": "sha512-s88xLzf1r81ICXLAVQVzaN6ZmX4A6U4z2nMbOwobxkLoIIfjVMBg7TeguTUXkKeXni795B6y5rnvDw7rxhAq9A==", + "dependencies": { + "decompress-tar": "^4.1.0", + "file-type": "^6.1.0", + "is-stream": "^1.1.0", + "seek-bzip": "^1.0.5", + "unbzip2-stream": "^1.0.9" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/decompress-tarbz2/node_modules/file-type": { + "version": "6.2.0", + "resolved": "https://registry.npmmirror.com/file-type/-/file-type-6.2.0.tgz", + "integrity": "sha512-YPcTBDV+2Tm0VqjybVd32MHdlEGAtuxS3VAYsumFokDSMG+ROT5wawGlnHDoz7bfMcMDt9hxuXvXwoKUx2fkOg==", + "engines": { + "node": ">=4" + } + }, + "node_modules/decompress-targz": { + "version": "4.1.1", + "resolved": "https://registry.npmmirror.com/decompress-targz/-/decompress-targz-4.1.1.tgz", + "integrity": "sha512-4z81Znfr6chWnRDNfFNqLwPvm4db3WuZkqV+UgXQzSngG3CEKdBkw5jrv3axjjL96glyiiKjsxJG3X6WBZwX3w==", + "dependencies": { + "decompress-tar": "^4.1.1", + "file-type": "^5.2.0", + "is-stream": "^1.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/decompress-targz/node_modules/file-type": { + "version": "5.2.0", + "resolved": "https://registry.npmmirror.com/file-type/-/file-type-5.2.0.tgz", + "integrity": "sha512-Iq1nJ6D2+yIO4c8HHg4fyVb8mAJieo1Oloy1mLLaB2PvezNedhBVm+QU7g0qM42aiMbRXTxKKwGD17rjKNJYVQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/decompress-unzip": { + "version": "4.0.1", + "resolved": "https://registry.npmmirror.com/decompress-unzip/-/decompress-unzip-4.0.1.tgz", + "integrity": "sha512-1fqeluvxgnn86MOh66u8FjbtJpAFv5wgCT9Iw8rcBqQcCo5tO8eiJw7NNTrvt9n4CRBVq7CstiS922oPgyGLrw==", + "dependencies": { + "file-type": "^3.8.0", + "get-stream": "^2.2.0", + "pify": "^2.3.0", + "yauzl": "^2.4.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/decompress-unzip/node_modules/file-type": { + "version": "3.9.0", + "resolved": "https://registry.npmmirror.com/file-type/-/file-type-3.9.0.tgz", + "integrity": "sha512-RLoqTXE8/vPmMuTI88DAzhMYC99I8BWv7zYP4A1puo5HIjEJ5EX48ighy4ZyKMG9EDXxBgW6e++cn7d1xuFghA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/decompress-unzip/node_modules/get-stream": { + "version": "2.3.1", + "resolved": "https://registry.npmmirror.com/get-stream/-/get-stream-2.3.1.tgz", + "integrity": "sha512-AUGhbbemXxrZJRD5cDvKtQxLuYaIbNtDTK8YqupCI393Q2KSTreEsLUN3ZxAWFGiKTzL6nKuzfcIvieflUX9qA==", + "dependencies": { + "object-assign": "^4.0.1", + "pinkie-promise": "^2.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/decompress-unzip/node_modules/pify": { + "version": "2.3.0", + "resolved": "https://registry.npmmirror.com/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/decompress/node_modules/make-dir": { + "version": "1.3.0", + "resolved": "https://registry.npmmirror.com/make-dir/-/make-dir-1.3.0.tgz", + "integrity": "sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==", + "dependencies": { + "pify": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/decompress/node_modules/make-dir/node_modules/pify": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/pify/-/pify-3.0.0.tgz", + "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==", + "engines": { + "node": ">=4" + } + }, + "node_modules/decompress/node_modules/pify": { + "version": "2.3.0", + "resolved": "https://registry.npmmirror.com/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/dedent": { + "version": "0.7.0", + "resolved": "https://registry.npmmirror.com/dedent/-/dedent-0.7.0.tgz", + "integrity": "sha512-Q6fKUPqnAHAyhiUgFU7BUzLiv0kd8saH9al7tnu5Q/okj6dnupxyTgFIBjVzJATdfIAm9NAsvXNzjaKa+bxVyA==", + "dev": true + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmmirror.com/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmmirror.com/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/default-gateway": { + "version": "6.0.3", + "resolved": "https://registry.npmmirror.com/default-gateway/-/default-gateway-6.0.3.tgz", + "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==", + "dev": true, + "dependencies": { + "execa": "^5.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/default-gateway/node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmmirror.com/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/default-gateway/node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/default-gateway/node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/default-gateway/node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmmirror.com/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/defaults": { + "version": "1.0.4", + "resolved": "https://registry.npmmirror.com/defaults/-/defaults-1.0.4.tgz", + "integrity": "sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==", + "dependencies": { + "clone": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/defaults/node_modules/clone": { + "version": "1.0.4", + "resolved": "https://registry.npmmirror.com/clone/-/clone-1.0.4.tgz", + "integrity": "sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-lazy-prop": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", + "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", + "engines": { + "node": ">=8" + } + }, + "node_modules/define-properties": { + "version": "1.2.1", + "resolved": "https://registry.npmmirror.com/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "peer": true, + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-property": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/define-property/-/define-property-2.0.2.tgz", + "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==", + "dependencies": { + "is-descriptor": "^1.0.2", + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/delegates": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/delegates/-/delegates-1.0.0.tgz", + "integrity": "sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==", + "optional": true + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/des.js": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/des.js/-/des.js-1.1.0.tgz", + "integrity": "sha512-r17GxjhUCjSRy8aiJpr8/UadFIzMzJGexI3Nmz4ADi9LYSFx4gTBp80+NaX/YsXWWLhpZ7v/v/ubEc/bCNfKwg==", + "peer": true, + "dependencies": { + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/detect-libc": { + "version": "2.0.3", + "resolved": "https://registry.npmmirror.com/detect-libc/-/detect-libc-2.0.3.tgz", + "integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==", + "optional": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/detect-newline": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/detect-newline/-/detect-newline-3.1.0.tgz", + "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/detect-node": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/detect-node/-/detect-node-2.1.0.tgz", + "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==", + "dev": true + }, + "node_modules/diff-sequences": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/diff-sequences/-/diff-sequences-27.5.1.tgz", + "integrity": "sha512-k1gCAXAsNgLwEL+Y8Wvl+M6oEFj5bgazfZULpS5CneoPPXRaCCW7dm+q21Ky2VEE5X+VeRDBVg1Pcvvsr4TtNQ==", + "dev": true, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/diffie-hellman": { + "version": "5.0.3", + "resolved": "https://registry.npmmirror.com/diffie-hellman/-/diffie-hellman-5.0.3.tgz", + "integrity": "sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==", + "peer": true, + "dependencies": { + "bn.js": "^4.1.0", + "miller-rabin": "^4.0.0", + "randombytes": "^2.0.0" + } + }, + "node_modules/diffie-hellman/node_modules/bn.js": { + "version": "4.12.0", + "resolved": "https://registry.npmmirror.com/bn.js/-/bn.js-4.12.0.tgz", + "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==", + "peer": true + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dns-packet": { + "version": "5.6.1", + "resolved": "https://registry.npmmirror.com/dns-packet/-/dns-packet-5.6.1.tgz", + "integrity": "sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==", + "dev": true, + "dependencies": { + "@leichtgewicht/ip-codec": "^2.0.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/dom-align": { + "version": "1.12.4", + "resolved": "https://registry.npmmirror.com/dom-align/-/dom-align-1.12.4.tgz", + "integrity": "sha512-R8LUSEay/68zE5c8/3BDxiTEvgb4xZTF0RKmAHfiEVN3klfIpXfi2/QCoiWPccVQ0J/ZGdz9OjzL4uJEP/MRAw==" + }, + "node_modules/dom-converter": { + "version": "0.2.0", + "resolved": "https://registry.npmmirror.com/dom-converter/-/dom-converter-0.2.0.tgz", + "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", + "dev": true, + "dependencies": { + "utila": "~0.4" + } + }, + "node_modules/dom-scroll-into-view": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/dom-scroll-into-view/-/dom-scroll-into-view-2.0.1.tgz", + "integrity": "sha512-bvVTQe1lfaUr1oFzZX80ce9KLDlZ3iU+XGNE/bz9HnGdklTieqsbmsLHe+rT2XWqopvL0PckkYqN7ksmm5pe3w==" + }, + "node_modules/dom-serializer": { + "version": "1.4.1", + "resolved": "https://registry.npmmirror.com/dom-serializer/-/dom-serializer-1.4.1.tgz", + "integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==", + "dev": true, + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.2.0", + "entities": "^2.0.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/domain-browser": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/domain-browser/-/domain-browser-1.2.0.tgz", + "integrity": "sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA==", + "peer": true, + "engines": { + "node": ">=0.4", + "npm": ">=1.2" + } + }, + "node_modules/domelementtype": { + "version": "2.3.0", + "resolved": "https://registry.npmmirror.com/domelementtype/-/domelementtype-2.3.0.tgz", + "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ] + }, + "node_modules/domexception": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/domexception/-/domexception-2.0.1.tgz", + "integrity": "sha512-yxJ2mFy/sibVQlu5qHjOkf9J3K6zgmCxgJ94u2EdvDOV09H+32LtRswEcUsmUWN72pVLOEnTSRaIVVzVQgS0dg==", + "deprecated": "Use your platform's native DOMException instead", + "dev": true, + "dependencies": { + "webidl-conversions": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/domexception/node_modules/webidl-conversions": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/webidl-conversions/-/webidl-conversions-5.0.0.tgz", + "integrity": "sha512-VlZwKPCkYKxQgeSbH5EyngOmRp7Ww7I9rQLERETtf5ofd9pGeswWiOtogpEO850jziPRarreGxn5QIiTqpb2wA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/domhandler": { + "version": "4.3.1", + "resolved": "https://registry.npmmirror.com/domhandler/-/domhandler-4.3.1.tgz", + "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==", + "dev": true, + "dependencies": { + "domelementtype": "^2.2.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/domutils": { + "version": "2.8.0", + "resolved": "https://registry.npmmirror.com/domutils/-/domutils-2.8.0.tgz", + "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", + "dev": true, + "dependencies": { + "dom-serializer": "^1.0.1", + "domelementtype": "^2.2.0", + "domhandler": "^4.2.0" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/dot-case": { + "version": "3.0.4", + "resolved": "https://registry.npmmirror.com/dot-case/-/dot-case-3.0.4.tgz", + "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==", + "dev": true, + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/dotenv": { + "version": "10.0.0", + "resolved": "https://registry.npmmirror.com/dotenv/-/dotenv-10.0.0.tgz", + "integrity": "sha512-rlBi9d8jpv9Sf1klPjNfFAuWDjKLwTIJJ/VxtoTwIR6hnZxcEOQCZg2oIL3MWBYw5GpUDKOEnND7LXTbIpQ03Q==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/dotenv-expand": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/dotenv-expand/-/dotenv-expand-5.1.0.tgz", + "integrity": "sha512-YXQl1DSa4/PQyRfgrv6aoNjhasp/p4qs9FjJ4q4cQk+8m4r6k4ZSiEyytKG8f8W9gi8WsQtIObNmKd+tMzNTmA==", + "dev": true + }, + "node_modules/download": { + "version": "7.1.0", + "resolved": "https://registry.npmmirror.com/download/-/download-7.1.0.tgz", + "integrity": "sha512-xqnBTVd/E+GxJVrX5/eUJiLYjCGPwMpdL+jGhGU57BvtcA7wwhtHVbXBeUk51kOpW3S7Jn3BQbN9Q1R1Km2qDQ==", + "dependencies": { + "archive-type": "^4.0.0", + "caw": "^2.0.1", + "content-disposition": "^0.5.2", + "decompress": "^4.2.0", + "ext-name": "^5.0.0", + "file-type": "^8.1.0", + "filenamify": "^2.0.0", + "get-stream": "^3.0.0", + "got": "^8.3.1", + "make-dir": "^1.2.0", + "p-event": "^2.1.0", + "pify": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/download-git-repo": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/download-git-repo/-/download-git-repo-3.0.2.tgz", + "integrity": "sha512-N8hWXD4hXqmEcNoR8TBYFntaOcYvEQ7Bz90mgm3bZRTuteGQqwT32VDMnTyD0KTEvb8BWrMc1tVmzuV9u/WrAg==", + "dependencies": { + "download": "^7.1.0", + "git-clone": "^0.1.0", + "rimraf": "^3.0.0" + } + }, + "node_modules/download/node_modules/make-dir": { + "version": "1.3.0", + "resolved": "https://registry.npmmirror.com/make-dir/-/make-dir-1.3.0.tgz", + "integrity": "sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==", + "dependencies": { + "pify": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/duplexer": { + "version": "0.1.2", + "resolved": "https://registry.npmmirror.com/duplexer/-/duplexer-0.1.2.tgz", + "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==", + "dev": true + }, + "node_modules/duplexer3": { + "version": "0.1.5", + "resolved": "https://registry.npmmirror.com/duplexer3/-/duplexer3-0.1.5.tgz", + "integrity": "sha512-1A8za6ws41LQgv9HrE/66jyC5yuSjQ3L/KOpFtoBilsAK2iA2wuS5rTt1OCzIvtS2V7nVmedsUU+DGRcjBmOYA==" + }, + "node_modules/duplexify": { + "version": "3.7.1", + "resolved": "https://registry.npmmirror.com/duplexify/-/duplexify-3.7.1.tgz", + "integrity": "sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==", + "peer": true, + "dependencies": { + "end-of-stream": "^1.0.0", + "inherits": "^2.0.1", + "readable-stream": "^2.0.0", + "stream-shift": "^1.0.0" + } + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmmirror.com/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "dev": true + }, + "node_modules/easy-stack": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/easy-stack/-/easy-stack-1.0.1.tgz", + "integrity": "sha512-wK2sCs4feiiJeFXn3zvY0p41mdU5VUgbgs1rNsc/y5ngFUijdWd+iIN8eoyuZHKB8xN6BL4PdWmzqFmxNg6V2w==", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/editorconfig": { + "version": "1.0.4", + "resolved": "https://registry.npmmirror.com/editorconfig/-/editorconfig-1.0.4.tgz", + "integrity": "sha512-L9Qe08KWTlqYMVvMcTIvMAdl1cDUubzRNYL+WfA4bLDMHe4nemKkpmYzkznE1FwLKu0EEmy6obgQKzMJrg4x9Q==", + "dev": true, + "dependencies": { + "@one-ini/wasm": "0.1.1", + "commander": "^10.0.0", + "minimatch": "9.0.1", + "semver": "^7.5.3" + }, + "bin": { + "editorconfig": "bin/editorconfig" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/editorconfig/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/editorconfig/node_modules/commander": { + "version": "10.0.1", + "resolved": "https://registry.npmmirror.com/commander/-/commander-10.0.1.tgz", + "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", + "dev": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/editorconfig/node_modules/minimatch": { + "version": "9.0.1", + "resolved": "https://registry.npmmirror.com/minimatch/-/minimatch-9.0.1.tgz", + "integrity": "sha512-0jWhJpD/MdhPXwPuiRkCbfYfSKp2qnn2eOc279qI7f+osl/l+prKSrvhg157zSYvx/1nmgn2NqdT6k2Z7zSH9w==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/editorconfig/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmmirror.com/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" + }, + "node_modules/ejs": { + "version": "3.1.10", + "resolved": "https://registry.npmmirror.com/ejs/-/ejs-3.1.10.tgz", + "integrity": "sha512-UeJmFfOrAQS8OJWPZ4qtgHyWExa088/MtK5UEyoJGFH67cDEXkZSviOiKRCZ4Xij0zxI3JECgYs3oKx+AizQBA==", + "dependencies": { + "jake": "^10.8.5" + }, + "bin": { + "ejs": "bin/cli.js" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.4.773", + "resolved": "https://registry.npmmirror.com/electron-to-chromium/-/electron-to-chromium-1.4.773.tgz", + "integrity": "sha512-87eHF+h3PlCRwbxVEAw9KtK3v7lWfc/sUDr0W76955AdYTG4bV/k0zrl585Qnj/skRMH2qOSiE+kqMeOQ+LOpw==" + }, + "node_modules/element-plus": { + "version": "2.7.3", + "resolved": "https://registry.npmmirror.com/element-plus/-/element-plus-2.7.3.tgz", + "integrity": "sha512-OaqY1kQ2xzNyRFyge3fzM7jqMwux+464RBEqd+ybRV9xPiGxtgnj/sVK4iEbnKnzQIa9XK03DOIFzoToUhu1DA==", + "dependencies": { + "@ctrl/tinycolor": "^3.4.1", + "@element-plus/icons-vue": "^2.3.1", + "@floating-ui/dom": "^1.0.1", + "@popperjs/core": "npm:@sxzz/popperjs-es@^2.11.7", + "@types/lodash": "^4.14.182", + "@types/lodash-es": "^4.17.6", + "@vueuse/core": "^9.1.0", + "async-validator": "^4.2.5", + "dayjs": "^1.11.3", + "escape-html": "^1.0.3", + "lodash": "^4.17.21", + "lodash-es": "^4.17.21", + "lodash-unified": "^1.0.2", + "memoize-one": "^6.0.0", + "normalize-wheel-es": "^1.2.0" + }, + "peerDependencies": { + "vue": "^3.2.0" + } + }, + "node_modules/elliptic": { + "version": "6.5.5", + "resolved": "https://registry.npmmirror.com/elliptic/-/elliptic-6.5.5.tgz", + "integrity": "sha512-7EjbcmUm17NQFu4Pmgmq2olYMj8nwMnpcddByChSUjArp8F5DQWcIcpriwO4ZToLNAJig0yiyjswfyGNje/ixw==", + "peer": true, + "dependencies": { + "bn.js": "^4.11.9", + "brorand": "^1.1.0", + "hash.js": "^1.0.0", + "hmac-drbg": "^1.0.1", + "inherits": "^2.0.4", + "minimalistic-assert": "^1.0.1", + "minimalistic-crypto-utils": "^1.0.1" + } + }, + "node_modules/elliptic/node_modules/bn.js": { + "version": "4.12.0", + "resolved": "https://registry.npmmirror.com/bn.js/-/bn.js-4.12.0.tgz", + "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==", + "peer": true + }, + "node_modules/emittery": { + "version": "0.8.1", + "resolved": "https://registry.npmmirror.com/emittery/-/emittery-0.8.1.tgz", + "integrity": "sha512-uDfvUjVrfGJJhymx/kz6prltenw1u7WrCg1oa94zYY8xxVpLLUu045LAT0dhDZdXG58/EpPL/5kA180fQ/qudg==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/emittery?sponsor=1" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmmirror.com/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/emojis-list": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/emojis-list/-/emojis-list-3.0.0.tgz", + "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/end-of-stream": { + "version": "1.4.4", + "resolved": "https://registry.npmmirror.com/end-of-stream/-/end-of-stream-1.4.4.tgz", + "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "dependencies": { + "once": "^1.4.0" + } + }, + "node_modules/enhanced-resolve": { + "version": "5.16.1", + "resolved": "https://registry.npmmirror.com/enhanced-resolve/-/enhanced-resolve-5.16.1.tgz", + "integrity": "sha512-4U5pNsuDl0EhuZpq46M5xPslstkviJuhrdobaRDBk2Jy2KO37FDAJl4lb2KlNabxT0m4MTK2UHNrsAcphE8nyw==", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/enhanced-resolve/node_modules/tapable": { + "version": "2.2.1", + "resolved": "https://registry.npmmirror.com/tapable/-/tapable-2.2.1.tgz", + "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/enquirer": { + "version": "2.4.1", + "resolved": "https://registry.npmmirror.com/enquirer/-/enquirer-2.4.1.tgz", + "integrity": "sha512-rRqJg/6gd538VHvR3PSrdRBb/1Vy2YfzHqzvbhGIQpDRKIa4FgV/54b5Q1xYSxOOwKvjXweS26E0Q+nAMwp2pQ==", + "dev": true, + "dependencies": { + "ansi-colors": "^4.1.1", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/entities": { + "version": "2.2.0", + "resolved": "https://registry.npmmirror.com/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/envinfo": { + "version": "7.13.0", + "resolved": "https://registry.npmmirror.com/envinfo/-/envinfo-7.13.0.tgz", + "integrity": "sha512-cvcaMr7KqXVh4nyzGTVqTum+gAiL265x5jUWQIDLq//zOGbW+gSW/C+OWLleY/rs9Qole6AZLMXPbtIFQbqu+Q==", + "bin": { + "envinfo": "dist/cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/errno": { + "version": "0.1.8", + "resolved": "https://registry.npmmirror.com/errno/-/errno-0.1.8.tgz", + "integrity": "sha512-dJ6oBr5SQ1VSd9qkk7ByRgb/1SH4JZjCHSW/mr63/QcXO9zLVxvJ6Oy13nio03rxpSnVDDjFor75SjVeZWPW/A==", + "peer": true, + "dependencies": { + "prr": "~1.0.1" + }, + "bin": { + "errno": "cli.js" + } + }, + "node_modules/error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmmirror.com/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/error-stack-parser": { + "version": "2.1.4", + "resolved": "https://registry.npmmirror.com/error-stack-parser/-/error-stack-parser-2.1.4.tgz", + "integrity": "sha512-Sk5V6wVazPhq5MhpO+AUxJn5x7XSXGl1R93Vn7i+zS15KDVxQijejNCrz8340/2bgLBjR9GtEG8ZVKONDjcqGQ==", + "dev": true, + "dependencies": { + "stackframe": "^1.3.4" + } + }, + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmmirror.com/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-module-lexer": { + "version": "1.5.2", + "resolved": "https://registry.npmmirror.com/es-module-lexer/-/es-module-lexer-1.5.2.tgz", + "integrity": "sha512-l60ETUTmLqbVbVHv1J4/qj+M8nq7AwMzEcg3kmJDt9dCNrTk+yHcYFf/Kw75pMDwd9mPcIGCG5LcS20SxYRzFA==" + }, + "node_modules/es5-ext": { + "version": "0.10.64", + "resolved": "https://registry.npmmirror.com/es5-ext/-/es5-ext-0.10.64.tgz", + "integrity": "sha512-p2snDhiLaXe6dahss1LddxqEm+SkuDvV8dnIQG0MWjyHpcMNfXKPE+/Cc0y+PhxJX3A4xGNeFCj5oc0BUh6deg==", + "hasInstallScript": true, + "dependencies": { + "es6-iterator": "^2.0.3", + "es6-symbol": "^3.1.3", + "esniff": "^2.0.1", + "next-tick": "^1.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/es6-iterator": { + "version": "2.0.3", + "resolved": "https://registry.npmmirror.com/es6-iterator/-/es6-iterator-2.0.3.tgz", + "integrity": "sha512-zw4SRzoUkd+cl+ZoE15A9o1oQd920Bb0iOJMQkQhl3jNc03YqVjAhG7scf9C5KWRU/R13Orf588uCC6525o02g==", + "dependencies": { + "d": "1", + "es5-ext": "^0.10.35", + "es6-symbol": "^3.1.1" + } + }, + "node_modules/es6-symbol": { + "version": "3.1.4", + "resolved": "https://registry.npmmirror.com/es6-symbol/-/es6-symbol-3.1.4.tgz", + "integrity": "sha512-U9bFFjX8tFiATgtkJ1zg25+KviIXpgRvRHS8sau3GfhVzThRQrOeksPeT0BWW2MNZs1OEWJ1DPXOQMn0KKRkvg==", + "dependencies": { + "d": "^1.0.2", + "ext": "^1.7.0" + }, + "engines": { + "node": ">=0.12" + } + }, + "node_modules/escalade": { + "version": "3.1.2", + "resolved": "https://registry.npmmirror.com/escalade/-/escalade-3.1.2.tgz", + "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmmirror.com/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" + }, + "node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmmirror.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/escodegen": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/escodegen/-/escodegen-2.1.0.tgz", + "integrity": "sha512-2NlIDTwUWJN0mRPQOdtQBzbUHvdGY2P1VXSyU83Q3xKxM7WHX2Ql8dKq782Q9TgQUNOLEzEYu9bzLNj1q88I5w==", + "dev": true, + "dependencies": { + "esprima": "^4.0.1", + "estraverse": "^5.2.0", + "esutils": "^2.0.2" + }, + "bin": { + "escodegen": "bin/escodegen.js", + "esgenerate": "bin/esgenerate.js" + }, + "engines": { + "node": ">=6.0" + }, + "optionalDependencies": { + "source-map": "~0.6.1" + } + }, + "node_modules/escodegen/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmmirror.com/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/eslint": { + "version": "7.32.0", + "resolved": "https://registry.npmmirror.com/eslint/-/eslint-7.32.0.tgz", + "integrity": "sha512-VHZ8gX+EDfz+97jGcgyGCyRia/dPOd6Xh9yPv8Bl1+SoaIwD+a/vlrOmGRUyOYu7MwUhc7CxqeaDZU13S4+EpA==", + "dev": true, + "dependencies": { + "@babel/code-frame": "7.12.11", + "@eslint/eslintrc": "^0.4.3", + "@humanwhocodes/config-array": "^0.5.0", + "ajv": "^6.10.0", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.0.1", + "doctrine": "^3.0.0", + "enquirer": "^2.3.5", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^5.1.1", + "eslint-utils": "^2.1.0", + "eslint-visitor-keys": "^2.0.0", + "espree": "^7.3.1", + "esquery": "^1.4.0", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "functional-red-black-tree": "^1.0.1", + "glob-parent": "^5.1.2", + "globals": "^13.6.0", + "ignore": "^4.0.6", + "import-fresh": "^3.0.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "js-yaml": "^3.13.1", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.0.4", + "natural-compare": "^1.4.0", + "optionator": "^0.9.1", + "progress": "^2.0.0", + "regexpp": "^3.1.0", + "semver": "^7.2.1", + "strip-ansi": "^6.0.0", + "strip-json-comments": "^3.1.0", + "table": "^6.0.9", + "text-table": "^0.2.0", + "v8-compile-cache": "^2.0.3" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-plugin-vue": { + "version": "8.7.1", + "resolved": "https://registry.npmmirror.com/eslint-plugin-vue/-/eslint-plugin-vue-8.7.1.tgz", + "integrity": "sha512-28sbtm4l4cOzoO1LtzQPxfxhQABararUb1JtqusQqObJpWX2e/gmVyeYVfepizPFne0Q5cILkYGiBoV36L12Wg==", + "dev": true, + "dependencies": { + "eslint-utils": "^3.0.0", + "natural-compare": "^1.4.0", + "nth-check": "^2.0.1", + "postcss-selector-parser": "^6.0.9", + "semver": "^7.3.5", + "vue-eslint-parser": "^8.0.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.2.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/eslint-plugin-vue/node_modules/eslint-utils": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/eslint-utils/-/eslint-utils-3.0.0.tgz", + "integrity": "sha512-uuQC43IGctw68pJA1RgbQS8/NP7rch6Cwd4j3ZBtgo4/8Flj4eGE7ZYSZRN3iq5pVUv6GPdW5Z1RFleo84uLDA==", + "dev": true, + "dependencies": { + "eslint-visitor-keys": "^2.0.0" + }, + "engines": { + "node": "^10.0.0 || ^12.0.0 || >= 14.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/mysticatea" + }, + "peerDependencies": { + "eslint": ">=5" + } + }, + "node_modules/eslint-plugin-vue/node_modules/eslint-visitor-keys": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz", + "integrity": "sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/eslint-plugin-vue/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmmirror.com/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/eslint-utils": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/eslint-utils/-/eslint-utils-2.1.0.tgz", + "integrity": "sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg==", + "dev": true, + "dependencies": { + "eslint-visitor-keys": "^1.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/mysticatea" + } + }, + "node_modules/eslint-utils/node_modules/eslint-visitor-keys": { + "version": "1.3.0", + "resolved": "https://registry.npmmirror.com/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", + "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmmirror.com/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-webpack-plugin": { + "version": "3.2.0", + "resolved": "https://registry.npmmirror.com/eslint-webpack-plugin/-/eslint-webpack-plugin-3.2.0.tgz", + "integrity": "sha512-avrKcGncpPbPSUHX6B3stNGzkKFto3eL+DKM4+VyMrVnhPc3vRczVlCq3uhuFOdRvDHTVXuzwk1ZKUrqDQHQ9w==", + "dev": true, + "dependencies": { + "@types/eslint": "^7.29.0 || ^8.4.1", + "jest-worker": "^28.0.2", + "micromatch": "^4.0.5", + "normalize-path": "^3.0.0", + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0", + "webpack": "^5.0.0" + } + }, + "node_modules/eslint-webpack-plugin/node_modules/ajv": { + "version": "8.13.0", + "resolved": "https://registry.npmmirror.com/ajv/-/ajv-8.13.0.tgz", + "integrity": "sha512-PRA911Blj99jR5RMeTunVbNXMF6Lp4vZXnk5GQjcnUWUTsrXtekg/pnmFFI2u/I36Y/2bITGS30GZCXei6uNkA==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.3", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.4.1" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/eslint-webpack-plugin/node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/eslint-webpack-plugin/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint-webpack-plugin/node_modules/jest-worker": { + "version": "28.1.3", + "resolved": "https://registry.npmmirror.com/jest-worker/-/jest-worker-28.1.3.tgz", + "integrity": "sha512-CqRA220YV/6jCo8VWvAt1KKx6eek1VIHMPeLEbpcfSfkEeWyBNppynM/o6q+Wmw+sOhos2ml34wZbSX3G13//g==", + "dev": true, + "dependencies": { + "@types/node": "*", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || ^16.10.0 || >=17.0.0" + } + }, + "node_modules/eslint-webpack-plugin/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "dev": true + }, + "node_modules/eslint-webpack-plugin/node_modules/schema-utils": { + "version": "4.2.0", + "resolved": "https://registry.npmmirror.com/schema-utils/-/schema-utils-4.2.0.tgz", + "integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/eslint-webpack-plugin/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/eslint/node_modules/@babel/code-frame": { + "version": "7.12.11", + "resolved": "https://registry.npmmirror.com/@babel/code-frame/-/code-frame-7.12.11.tgz", + "integrity": "sha512-Zt1yodBx1UcyiePMSkWnU4hPqhwq7hGi2nFL1LeA3EUl+q2LQx16MISgJ0+z7dnmgvP9QtIleuETGOiOH1RcIw==", + "dev": true, + "dependencies": { + "@babel/highlight": "^7.10.4" + } + }, + "node_modules/eslint/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/eslint/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmmirror.com/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/eslint/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/eslint/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/eslint/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/eslint/node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/eslint-visitor-keys": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz", + "integrity": "sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/eslint/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmmirror.com/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/eslint/node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmmirror.com/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint/node_modules/ignore": { + "version": "4.0.6", + "resolved": "https://registry.npmmirror.com/ignore/-/ignore-4.0.6.tgz", + "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/eslint/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmmirror.com/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dev": true, + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/eslint/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/eslint/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/esniff": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/esniff/-/esniff-2.0.1.tgz", + "integrity": "sha512-kTUIGKQ/mDPFoJ0oVfcmyJn4iBDRptjNVIzwIFR7tqWXdVI9xfA2RMwY/gbSpJG3lkdWNEjLap/NqVHZiJsdfg==", + "dependencies": { + "d": "^1.0.1", + "es5-ext": "^0.10.62", + "event-emitter": "^0.3.5", + "type": "^2.7.2" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/espree": { + "version": "7.3.1", + "resolved": "https://registry.npmmirror.com/espree/-/espree-7.3.1.tgz", + "integrity": "sha512-v3JCNCE64umkFpmkFGqzVKsOT0tN1Zr+ueqLZfpV1Ob8e+CEgPWa+OxCoGH3tnhimMKIaBm4m/vaRpJ/krRz2g==", + "dev": true, + "dependencies": { + "acorn": "^7.4.0", + "acorn-jsx": "^5.3.1", + "eslint-visitor-keys": "^1.3.0" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/espree/node_modules/acorn": { + "version": "7.4.1", + "resolved": "https://registry.npmmirror.com/acorn/-/acorn-7.4.1.tgz", + "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==", + "dev": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/espree/node_modules/eslint-visitor-keys": { + "version": "1.3.0", + "resolved": "https://registry.npmmirror.com/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", + "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmmirror.com/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esquery": { + "version": "1.5.0", + "resolved": "https://registry.npmmirror.com/esquery/-/esquery-1.5.0.tgz", + "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", + "dev": true, + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esquery/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmmirror.com/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esrecurse/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmmirror.com/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estree-walker": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/estree-walker/-/estree-walker-2.0.2.tgz", + "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==" + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmmirror.com/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmmirror.com/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/event-emitter": { + "version": "0.3.5", + "resolved": "https://registry.npmmirror.com/event-emitter/-/event-emitter-0.3.5.tgz", + "integrity": "sha512-D9rRn9y7kLPnJ+hMq7S/nhvoKwwvVJahBi2BPmx3bvbsEdK3W9ii8cBSGjP+72/LnM4n6fo3+dkCX5FeTQruXA==", + "dependencies": { + "d": "1", + "es5-ext": "~0.10.14" + } + }, + "node_modules/event-pubsub": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/event-pubsub/-/event-pubsub-4.3.0.tgz", + "integrity": "sha512-z7IyloorXvKbFx9Bpie2+vMJKKx1fH1EN5yiTfp8CiLOTptSYy1g8H4yDpGlEdshL1PBiFtBHepF2cNsqeEeFQ==", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/eventemitter3": { + "version": "3.1.2", + "resolved": "https://registry.npmmirror.com/eventemitter3/-/eventemitter3-3.1.2.tgz", + "integrity": "sha512-tvtQIeLVHjDkJYnzf2dgVMxfuSGJeM/7UCG17TT4EumTfNtF+0nebF/4zWOIkCreAbtNqhGEboB6BWrwqNaw4Q==" + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmmirror.com/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/evp_bytestokey": { + "version": "1.0.3", + "resolved": "https://registry.npmmirror.com/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz", + "integrity": "sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==", + "peer": true, + "dependencies": { + "md5.js": "^1.3.4", + "safe-buffer": "^5.1.1" + } + }, + "node_modules/execa": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/execa/-/execa-1.0.0.tgz", + "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==", + "dependencies": { + "cross-spawn": "^6.0.0", + "get-stream": "^4.0.0", + "is-stream": "^1.1.0", + "npm-run-path": "^2.0.0", + "p-finally": "^1.0.0", + "signal-exit": "^3.0.0", + "strip-eof": "^1.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/execa/node_modules/cross-spawn": { + "version": "6.0.5", + "resolved": "https://registry.npmmirror.com/cross-spawn/-/cross-spawn-6.0.5.tgz", + "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "dependencies": { + "nice-try": "^1.0.4", + "path-key": "^2.0.1", + "semver": "^5.5.0", + "shebang-command": "^1.2.0", + "which": "^1.2.9" + }, + "engines": { + "node": ">=4.8" + } + }, + "node_modules/execa/node_modules/get-stream": { + "version": "4.1.0", + "resolved": "https://registry.npmmirror.com/get-stream/-/get-stream-4.1.0.tgz", + "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", + "dependencies": { + "pump": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/execa/node_modules/path-key": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/path-key/-/path-key-2.0.1.tgz", + "integrity": "sha512-fEHGKCSmUSDPv4uoj8AlD+joPlq3peND+HRYyxFz4KPw4z926S/b8rIuFs2FYJg3BwsxJf6A9/3eIdLaYC+9Dw==", + "engines": { + "node": ">=4" + } + }, + "node_modules/execa/node_modules/semver": { + "version": "5.7.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/execa/node_modules/shebang-command": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/shebang-command/-/shebang-command-1.2.0.tgz", + "integrity": "sha512-EV3L1+UQWGor21OmnvojK36mhg+TyIKDh3iFBKBohr5xeXIhNBcx8oWdgkTEEQ+BEFFYdLRuqMfd5L84N1V5Vg==", + "dependencies": { + "shebang-regex": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/execa/node_modules/shebang-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/shebang-regex/-/shebang-regex-1.0.0.tgz", + "integrity": "sha512-wpoSFAxys6b2a2wHZ1XpDSgD7N9iVjg29Ph9uV/uaP9Ex/KXlkTZTeddxDPSYQpgvzKLGJke2UU0AzoGCjNIvQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/execa/node_modules/which": { + "version": "1.3.1", + "resolved": "https://registry.npmmirror.com/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "which": "bin/which" + } + }, + "node_modules/exit": { + "version": "0.1.2", + "resolved": "https://registry.npmmirror.com/exit/-/exit-0.1.2.tgz", + "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/expand-brackets": { + "version": "2.1.4", + "resolved": "https://registry.npmmirror.com/expand-brackets/-/expand-brackets-2.1.4.tgz", + "integrity": "sha512-w/ozOKR9Obk3qoWeY/WDi6MFta9AoMR+zud60mdnbniMcBxRuFJyDt2LdX/14A1UABeqk+Uk+LDfUpvoGKppZA==", + "dependencies": { + "debug": "^2.3.3", + "define-property": "^0.2.5", + "extend-shallow": "^2.0.1", + "posix-character-classes": "^0.1.0", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/expand-brackets/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmmirror.com/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/expand-brackets/node_modules/define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmmirror.com/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha512-Rr7ADjQZenceVOAKop6ALkkRAmH1A4Gx9hV/7ZujPUN2rkATqFO0JZLZInbAjpZYoJ1gUx8MRMQVkYemcbMSTA==", + "dependencies": { + "is-descriptor": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/expand-brackets/node_modules/is-descriptor": { + "version": "0.1.7", + "resolved": "https://registry.npmmirror.com/is-descriptor/-/is-descriptor-0.1.7.tgz", + "integrity": "sha512-C3grZTvObeN1xud4cRWl366OMXZTj0+HGyk4hvfpx4ZHt1Pb60ANSXqCK7pdOTeUQpRzECBSTphqvD7U+l22Eg==", + "dependencies": { + "is-accessor-descriptor": "^1.0.1", + "is-data-descriptor": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/expand-brackets/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/expect": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/expect/-/expect-27.5.1.tgz", + "integrity": "sha512-E1q5hSUG2AmYQwQJ041nvgpkODHQvB+RKlB4IYdru6uJsyFTRyZAP463M+1lINorwbqAmUggi6+WwkD8lCS/Dw==", + "dev": true, + "dependencies": { + "@jest/types": "^27.5.1", + "jest-get-type": "^27.5.1", + "jest-matcher-utils": "^27.5.1", + "jest-message-util": "^27.5.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/express": { + "version": "4.19.2", + "resolved": "https://registry.npmmirror.com/express/-/express-4.19.2.tgz", + "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "1.20.2", + "content-disposition": "0.5.4", + "content-type": "~1.0.4", + "cookie": "0.6.0", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.2.0", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "merge-descriptors": "1.0.1", + "methods": "~1.1.2", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "0.1.7", + "proxy-addr": "~2.0.7", + "qs": "6.11.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "0.18.0", + "serve-static": "1.15.0", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/express-history-api-fallback": { + "version": "2.2.1", + "resolved": "https://registry.npmmirror.com/express-history-api-fallback/-/express-history-api-fallback-2.2.1.tgz", + "integrity": "sha512-swxwm3aP8vrOOvlzOdZvHlSZtJGwHKaY94J6AkrAgCTmcbko3IRwbkhLv2wKV1WeZhjxX58aLMpP3atDBnKuZg==" + }, + "node_modules/express/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmmirror.com/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/express/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/ext": { + "version": "1.7.0", + "resolved": "https://registry.npmmirror.com/ext/-/ext-1.7.0.tgz", + "integrity": "sha512-6hxeJYaL110a9b5TEJSj0gojyHQAmA2ch5Os+ySCiA1QGdS697XWY1pzsrSjqA9LDEEgdB/KypIlR59RcLuHYw==", + "dependencies": { + "type": "^2.7.2" + } + }, + "node_modules/ext-list": { + "version": "2.2.2", + "resolved": "https://registry.npmmirror.com/ext-list/-/ext-list-2.2.2.tgz", + "integrity": "sha512-u+SQgsubraE6zItfVA0tBuCBhfU9ogSRnsvygI7wht9TS510oLkBRXBsqopeUG/GBOIQyKZO9wjTqIu/sf5zFA==", + "dependencies": { + "mime-db": "^1.28.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ext-name": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/ext-name/-/ext-name-5.0.0.tgz", + "integrity": "sha512-yblEwXAbGv1VQDmow7s38W77hzAgJAO50ztBLMcUyUBfxv1HC+LGwtiEN+Co6LtlqT/5uwVOxsD4TNIilWhwdQ==", + "dependencies": { + "ext-list": "^2.0.0", + "sort-keys-length": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", + "dependencies": { + "is-extendable": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/external-editor": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/external-editor/-/external-editor-3.1.0.tgz", + "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==", + "dependencies": { + "chardet": "^0.7.0", + "iconv-lite": "^0.4.24", + "tmp": "^0.0.33" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/extglob": { + "version": "2.0.4", + "resolved": "https://registry.npmmirror.com/extglob/-/extglob-2.0.4.tgz", + "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==", + "dependencies": { + "array-unique": "^0.3.2", + "define-property": "^1.0.0", + "expand-brackets": "^2.1.4", + "extend-shallow": "^2.0.1", + "fragment-cache": "^0.2.1", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/extglob/node_modules/define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/define-property/-/define-property-1.0.0.tgz", + "integrity": "sha512-cZTYKFWspt9jZsMscWo8sc/5lbPC9Q0N5nBLgb+Yd915iL3udB1uFgS3B8YCx66UVHq018DAVFoee7x+gxggeA==", + "dependencies": { + "is-descriptor": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmmirror.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + }, + "node_modules/fast-glob": { + "version": "3.3.2", + "resolved": "https://registry.npmmirror.com/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmmirror.com/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmmirror.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true + }, + "node_modules/fastest-levenshtein": { + "version": "1.0.16", + "resolved": "https://registry.npmmirror.com/fastest-levenshtein/-/fastest-levenshtein-1.0.16.tgz", + "integrity": "sha512-eRnCtTTtGZFpQCwhJiUOuxPQWRXVKYDn0b2PeHfXL6/Zi53SLAzAHfVhVWK2AryC/WH05kGfxhFIPvTF0SXQzg==", + "engines": { + "node": ">= 4.9.1" + } + }, + "node_modules/fastq": { + "version": "1.17.1", + "resolved": "https://registry.npmmirror.com/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/faye-websocket": { + "version": "0.11.4", + "resolved": "https://registry.npmmirror.com/faye-websocket/-/faye-websocket-0.11.4.tgz", + "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==", + "dev": true, + "dependencies": { + "websocket-driver": ">=0.5.1" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/fb-watchman": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/fb-watchman/-/fb-watchman-2.0.2.tgz", + "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==", + "dev": true, + "dependencies": { + "bser": "2.1.1" + } + }, + "node_modules/fd-slicer": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/fd-slicer/-/fd-slicer-1.1.0.tgz", + "integrity": "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==", + "dependencies": { + "pend": "~1.2.0" + } + }, + "node_modules/figgy-pudding": { + "version": "3.5.2", + "resolved": "https://registry.npmmirror.com/figgy-pudding/-/figgy-pudding-3.5.2.tgz", + "integrity": "sha512-0btnI/H8f2pavGMN8w40mlSKOfTK2SVJmBfBeVIj3kNw0swwgzyRq0d5TJVOwodFmtvpPeWPN/MCcfuWF0Ezbw==", + "deprecated": "This module is no longer supported.", + "peer": true + }, + "node_modules/figures": { + "version": "3.2.0", + "resolved": "https://registry.npmmirror.com/figures/-/figures-3.2.0.tgz", + "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", + "dependencies": { + "escape-string-regexp": "^1.0.5" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "dev": true, + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/file-type": { + "version": "8.1.0", + "resolved": "https://registry.npmmirror.com/file-type/-/file-type-8.1.0.tgz", + "integrity": "sha512-qyQ0pzAy78gVoJsmYeNgl8uH8yKhr1lVhW7JbzJmnlRi0I4R2eEDEJZVKG8agpDnLpacwNbDhLNG/LMdxHD2YQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/file-uri-to-path": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", + "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==", + "optional": true, + "peer": true + }, + "node_modules/filelist": { + "version": "1.0.4", + "resolved": "https://registry.npmmirror.com/filelist/-/filelist-1.0.4.tgz", + "integrity": "sha512-w1cEuf3S+DrLCQL7ET6kz+gmlJdbq9J7yXCSjK/OZCPA+qEN1WyF4ZAf0YYJa4/shHJra2t/d/r8SV4Ji+x+8Q==", + "dependencies": { + "minimatch": "^5.0.1" + } + }, + "node_modules/filelist/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/filelist/node_modules/minimatch": { + "version": "5.1.6", + "resolved": "https://registry.npmmirror.com/minimatch/-/minimatch-5.1.6.tgz", + "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/filename-reserved-regex": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/filename-reserved-regex/-/filename-reserved-regex-2.0.0.tgz", + "integrity": "sha512-lc1bnsSr4L4Bdif8Xb/qrtokGbq5zlsms/CYH8PP+WtCkGNF65DPiQY8vG3SakEdRn8Dlnm+gW/qWKKjS5sZzQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/filenamify": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/filenamify/-/filenamify-2.1.0.tgz", + "integrity": "sha512-ICw7NTT6RsDp2rnYKVd8Fu4cr6ITzGy3+u4vUujPkabyaz+03F24NWEX7fs5fp+kBonlaqPH8fAO2NM+SXt/JA==", + "dependencies": { + "filename-reserved-regex": "^2.0.0", + "strip-outer": "^1.0.0", + "trim-repeated": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmmirror.com/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/finalhandler": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/finalhandler/-/finalhandler-1.2.0.tgz", + "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "statuses": "2.0.1", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/finalhandler/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmmirror.com/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/finalhandler/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/find-cache-dir": { + "version": "3.3.2", + "resolved": "https://registry.npmmirror.com/find-cache-dir/-/find-cache-dir-3.3.2.tgz", + "integrity": "sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig==", + "dev": true, + "dependencies": { + "commondir": "^1.0.1", + "make-dir": "^3.0.2", + "pkg-dir": "^4.1.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/avajs/find-cache-dir?sponsor=1" + } + }, + "node_modules/find-cache-dir/node_modules/pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmmirror.com/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmmirror.com/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/fkill": { + "version": "7.2.1", + "resolved": "https://registry.npmmirror.com/fkill/-/fkill-7.2.1.tgz", + "integrity": "sha512-eN9cmsIlRdq06wu3m01OOEgQf5Xh/M7REm0jfZ4eL3V3XisjXzfRq3iyqtKS+FhO6wB36FvWRiRGdeSx5KpLAQ==", + "dependencies": { + "aggregate-error": "^3.1.0", + "arrify": "^2.0.1", + "execa": "^5.0.0", + "pid-port": "^0.1.0", + "process-exists": "^4.0.0", + "ps-list": "^7.2.0", + "taskkill": "^3.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/fkill/node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmmirror.com/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/fkill/node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/fkill/node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/fkill/node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmmirror.com/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://registry.npmmirror.com/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "bin": { + "flat": "cli.js" + } + }, + "node_modules/flat-cache": { + "version": "3.2.0", + "resolved": "https://registry.npmmirror.com/flat-cache/-/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", + "dev": true, + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/flatted": { + "version": "3.3.1", + "resolved": "https://registry.npmmirror.com/flatted/-/flatted-3.3.1.tgz", + "integrity": "sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==", + "dev": true + }, + "node_modules/flow-parser": { + "version": "0.236.0", + "resolved": "https://registry.npmmirror.com/flow-parser/-/flow-parser-0.236.0.tgz", + "integrity": "sha512-0OEk9Gr+Yj7wjDW2KgaNYUypKau71jAfFyeLQF5iVtxqc6uJHag/MT7pmaEApf4qM7u86DkBcd4ualddYMfbLw==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/flush-write-stream": { + "version": "1.1.1", + "resolved": "https://registry.npmmirror.com/flush-write-stream/-/flush-write-stream-1.1.1.tgz", + "integrity": "sha512-3Z4XhFZ3992uIq0XOqb9AreonueSYphE6oYbpt5+3u06JWklbsPkNv3ZKkP9Bz/r+1MWCaMoSQ28P85+1Yc77w==", + "peer": true, + "dependencies": { + "inherits": "^2.0.3", + "readable-stream": "^2.3.6" + } + }, + "node_modules/follow-redirects": { + "version": "1.15.6", + "resolved": "https://registry.npmmirror.com/follow-redirects/-/follow-redirects-1.15.6.tgz", + "integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/for-each": { + "version": "0.3.3", + "resolved": "https://registry.npmmirror.com/for-each/-/for-each-0.3.3.tgz", + "integrity": "sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==", + "dependencies": { + "is-callable": "^1.1.3" + } + }, + "node_modules/for-in": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/for-in/-/for-in-1.0.2.tgz", + "integrity": "sha512-7EwmXrOjyL+ChxMhmG5lnW9MPt1aIeZEwKhQzoBUdTV0N3zuwWDZYVJatDvZ2OyzPUvdIAZDsCetk3coyMfcnQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/foreground-child": { + "version": "3.1.1", + "resolved": "https://registry.npmmirror.com/foreground-child/-/foreground-child-3.1.1.tgz", + "integrity": "sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.0", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/foreground-child/node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmmirror.com/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/fork-ts-checker-webpack-plugin": { + "version": "6.5.3", + "resolved": "https://registry.npmmirror.com/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.3.tgz", + "integrity": "sha512-SbH/l9ikmMWycd5puHJKTkZJKddF4iRLyW3DeZ08HTI7NGyLS38MXd/KGgeWumQO7YNQbW2u/NtPT2YowbPaGQ==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.8.3", + "@types/json-schema": "^7.0.5", + "chalk": "^4.1.0", + "chokidar": "^3.4.2", + "cosmiconfig": "^6.0.0", + "deepmerge": "^4.2.2", + "fs-extra": "^9.0.0", + "glob": "^7.1.6", + "memfs": "^3.1.2", + "minimatch": "^3.0.4", + "schema-utils": "2.7.0", + "semver": "^7.3.2", + "tapable": "^1.0.0" + }, + "engines": { + "node": ">=10", + "yarn": ">=1.0.0" + }, + "peerDependencies": { + "eslint": ">= 6", + "typescript": ">= 2.7", + "vue-template-compiler": "*", + "webpack": ">= 4" + }, + "peerDependenciesMeta": { + "eslint": { + "optional": true + }, + "vue-template-compiler": { + "optional": true + } + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/schema-utils": { + "version": "2.7.0", + "resolved": "https://registry.npmmirror.com/schema-utils/-/schema-utils-2.7.0.tgz", + "integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.4", + "ajv": "^6.12.2", + "ajv-keywords": "^3.4.1" + }, + "engines": { + "node": ">= 8.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/form-data": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/form-data/-/form-data-3.0.1.tgz", + "integrity": "sha512-RHkBKtLWUVwd7SqRIvCZMEvAMoGUp0XU+seQiZejj0COz3RI3hWP4sCv3gZWWLjJTd7rGwcsF5eKZGii0r/hbg==", + "dev": true, + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmmirror.com/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fraction.js": { + "version": "4.3.7", + "resolved": "https://registry.npmmirror.com/fraction.js/-/fraction.js-4.3.7.tgz", + "integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==", + "dev": true, + "engines": { + "node": "*" + }, + "funding": { + "type": "patreon", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/fragment-cache": { + "version": "0.2.1", + "resolved": "https://registry.npmmirror.com/fragment-cache/-/fragment-cache-0.2.1.tgz", + "integrity": "sha512-GMBAbW9antB8iZRHLoGw0b3HANt57diZYFO/HL1JGIC1MjKrdmhxvrJbupnVvpys0zsz7yBApXdQyfepKly2kA==", + "dependencies": { + "map-cache": "^0.2.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmmirror.com/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/from2": { + "version": "2.3.0", + "resolved": "https://registry.npmmirror.com/from2/-/from2-2.3.0.tgz", + "integrity": "sha512-OMcX/4IC/uqEPVgGeyfN22LJk6AZrMkRZHxcHBMBvHScDGgwTm2GT2Wkgtocyd3JfZffjj2kYUDXXII0Fk9W0g==", + "dependencies": { + "inherits": "^2.0.1", + "readable-stream": "^2.0.0" + } + }, + "node_modules/fs-constants": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/fs-constants/-/fs-constants-1.0.0.tgz", + "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==" + }, + "node_modules/fs-extra": { + "version": "9.1.0", + "resolved": "https://registry.npmmirror.com/fs-extra/-/fs-extra-9.1.0.tgz", + "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "dependencies": { + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/fs-minipass": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/fs-minipass/-/fs-minipass-2.1.0.tgz", + "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", + "optional": true, + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/fs-monkey": { + "version": "1.0.6", + "resolved": "https://registry.npmmirror.com/fs-monkey/-/fs-monkey-1.0.6.tgz", + "integrity": "sha512-b1FMfwetIKymC0eioW7mTywihSQE4oLzQn1dB6rZB5fx/3NpNEdAWeCSMB+60/AeT0TCXsxzAlcYVEFCTAksWg==", + "dev": true + }, + "node_modules/fs-write-stream-atomic": { + "version": "1.0.10", + "resolved": "https://registry.npmmirror.com/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz", + "integrity": "sha512-gehEzmPn2nAwr39eay+x3X34Ra+M2QlVUTLhkXPjWdeO8RF9kszk116avgBJM3ZyNHgHXBNx+VmPaFC36k0PzA==", + "peer": true, + "dependencies": { + "graceful-fs": "^4.1.2", + "iferr": "^0.1.5", + "imurmurhash": "^0.1.4", + "readable-stream": "1 || 2" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmmirror.com/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/fswin": { + "version": "2.17.1227", + "resolved": "https://registry.npmmirror.com/fswin/-/fswin-2.17.1227.tgz", + "integrity": "sha512-xNDktvwzSsXT8Xqnpz59VbuFwGHhtn1w+dS7QQ+wAu5cbH0p3WMGKU9Duf7cPna+nubhR+5ZG1MTl6/V6xgRgw==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmmirror.com/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/functional-red-black-tree": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz", + "integrity": "sha512-dsKNQNdj6xA3T+QlADDA7mOSlX0qiMINjn0cgr+eGHGsbSHzTabcIogz2+p/iqP1Xs6EP/sS2SbqH+brGTbq0g==", + "dev": true + }, + "node_modules/gauge": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/gauge/-/gauge-3.0.2.tgz", + "integrity": "sha512-+5J6MS/5XksCuXq++uFRsnUd7Ovu1XenbeuIuNRJxYWjgQbPuFhT14lAvsWfqfAmnwluf1OwMjz39HjfLPci0Q==", + "optional": true, + "dependencies": { + "aproba": "^1.0.3 || ^2.0.0", + "color-support": "^1.1.2", + "console-control-strings": "^1.0.0", + "has-unicode": "^2.0.1", + "object-assign": "^4.1.1", + "signal-exit": "^3.0.0", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1", + "wide-align": "^1.1.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmmirror.com/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmmirror.com/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmmirror.com/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-package-type": { + "version": "0.1.0", + "resolved": "https://registry.npmmirror.com/get-package-type/-/get-package-type-0.1.0.tgz", + "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", + "dev": true, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/get-proxy": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/get-proxy/-/get-proxy-2.1.0.tgz", + "integrity": "sha512-zmZIaQTWnNQb4R4fJUEp/FC51eZsc6EkErspy3xtIYStaq8EB/hDIWipxsal+E8rz0qD7f2sL/NA9Xee4RInJw==", + "dependencies": { + "npm-conf": "^1.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/get-stream": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/get-stream/-/get-stream-3.0.0.tgz", + "integrity": "sha512-GlhdIUuVakc8SJ6kK0zAFbiGzRFzNnY4jUuEbV9UROo4Y+0Ny4fjvcZFVTeDA4odpFyOQzaw6hXukJSq/f28sQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/get-value": { + "version": "2.0.6", + "resolved": "https://registry.npmmirror.com/get-value/-/get-value-2.0.6.tgz", + "integrity": "sha512-Ln0UQDlxH1BapMu3GPtf7CuYNwRZf2gwCuPqbyG6pB8WfmFpzqcy4xtAaAMUhnNqjMKTiCPZG2oMT3YSx8U2NA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/git-clone": { + "version": "0.1.0", + "resolved": "https://registry.npmmirror.com/git-clone/-/git-clone-0.1.0.tgz", + "integrity": "sha512-zs9rlfa7HyaJAKG9o+V7C6qfMzyc+tb1IIXdUFcOBcR1U7siKy/uPdauLlrH1mc0vOgUwIv4BF+QxPiiTYz3Rw==" + }, + "node_modules/git-config-path": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/git-config-path/-/git-config-path-2.0.0.tgz", + "integrity": "sha512-qc8h1KIQbJpp+241id3GuAtkdyJ+IK+LIVtkiFTRKRrmddDzs3SI9CvP1QYmWBFvm1I/PWRwj//of8bgAc0ltA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmmirror.com/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmmirror.com/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://registry.npmmirror.com/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", + "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==" + }, + "node_modules/global-dirs": { + "version": "0.1.1", + "resolved": "https://registry.npmmirror.com/global-dirs/-/global-dirs-0.1.1.tgz", + "integrity": "sha512-NknMLn7F2J7aflwFOlGdNIuCDpN3VGoSoB+aap3KABFWbHVn1TCgFC+np23J8W2BiZbjfEw3BFBycSMv1AFblg==", + "dependencies": { + "ini": "^1.3.4" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/global-dirs/node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmmirror.com/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==" + }, + "node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmmirror.com/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmmirror.com/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/gopd/-/gopd-1.0.1.tgz", + "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", + "dependencies": { + "get-intrinsic": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/got": { + "version": "8.3.2", + "resolved": "https://registry.npmmirror.com/got/-/got-8.3.2.tgz", + "integrity": "sha512-qjUJ5U/hawxosMryILofZCkm3C84PLJS/0grRIpjAwu+Lkxxj5cxeCU25BG0/3mDSpXKTyZr8oh8wIgLaH0QCw==", + "dependencies": { + "@sindresorhus/is": "^0.7.0", + "cacheable-request": "^2.1.1", + "decompress-response": "^3.3.0", + "duplexer3": "^0.1.4", + "get-stream": "^3.0.0", + "into-stream": "^3.1.0", + "is-retry-allowed": "^1.1.0", + "isurl": "^1.0.0-alpha5", + "lowercase-keys": "^1.0.0", + "mimic-response": "^1.0.0", + "p-cancelable": "^0.4.0", + "p-timeout": "^2.0.1", + "pify": "^3.0.0", + "safe-buffer": "^5.1.1", + "timed-out": "^4.0.1", + "url-parse-lax": "^3.0.0", + "url-to-options": "^1.0.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmmirror.com/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmmirror.com/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true + }, + "node_modules/graphql": { + "version": "15.8.0", + "resolved": "https://registry.npmmirror.com/graphql/-/graphql-15.8.0.tgz", + "integrity": "sha512-5gghUc24tP9HRznNpV2+FIoq3xKkj5dTQqf4v0CpdPbFVwFkWoxOM+o+2OC9ZSvjEMTjfmG9QT+gcvggTwW1zw==", + "engines": { + "node": ">= 10.x" + } + }, + "node_modules/graphql-subscriptions": { + "version": "1.2.1", + "resolved": "https://registry.npmmirror.com/graphql-subscriptions/-/graphql-subscriptions-1.2.1.tgz", + "integrity": "sha512-95yD/tKi24q8xYa7Q9rhQN16AYj5wPbrb8tmHGM3WRc9EBmWrG/0kkMl+tQG8wcEuE9ibR4zyOM31p5Sdr2v4g==", + "dependencies": { + "iterall": "^1.3.0" + }, + "peerDependencies": { + "graphql": "^0.10.5 || ^0.11.3 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0" + } + }, + "node_modules/graphql-tag": { + "version": "2.12.6", + "resolved": "https://registry.npmmirror.com/graphql-tag/-/graphql-tag-2.12.6.tgz", + "integrity": "sha512-FdSNcu2QQcWnM2VNvSCCDCVS5PpPqpzgFT8+GXzqJuoDd0CBncxCY278u4mhRO7tMgo2JjgJA5aZ+nWSQ/Z+xg==", + "dependencies": { + "tslib": "^2.1.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "graphql": "^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0" + } + }, + "node_modules/graphql-type-json": { + "version": "0.3.2", + "resolved": "https://registry.npmmirror.com/graphql-type-json/-/graphql-type-json-0.3.2.tgz", + "integrity": "sha512-J+vjof74oMlCWXSvt0DOf2APEdZOCdubEvGDUAlqH//VBYcOYsGgRW7Xzorr44LvkjiuvecWc8fChxuZZbChtg==", + "peerDependencies": { + "graphql": ">=0.8.0" + } + }, + "node_modules/growly": { + "version": "1.3.0", + "resolved": "https://registry.npmmirror.com/growly/-/growly-1.3.0.tgz", + "integrity": "sha512-+xGQY0YyAWCnqy7Cd++hc2JqMYzlm0dG30Jd0beaA64sROr8C4nt8Yc9V5Ro3avlSUDTN0ulqP/VBKi1/lLygw==" + }, + "node_modules/gzip-size": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/gzip-size/-/gzip-size-6.0.0.tgz", + "integrity": "sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==", + "dev": true, + "dependencies": { + "duplexer": "^0.1.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/handle-thing": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/handle-thing/-/handle-thing-2.0.1.tgz", + "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==", + "dev": true + }, + "node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "engines": { + "node": ">=4" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.0.3", + "resolved": "https://registry.npmmirror.com/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbol-support-x": { + "version": "1.4.2", + "resolved": "https://registry.npmmirror.com/has-symbol-support-x/-/has-symbol-support-x-1.4.2.tgz", + "integrity": "sha512-3ToOva++HaW+eCpgqZrCfN51IPB+7bJNVT6CUATzueB5Heb8o6Nam0V3HG5dlDvZU1Gn5QLcbahiKw/XVk5JJw==", + "engines": { + "node": "*" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmmirror.com/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-to-string-tag-x": { + "version": "1.4.1", + "resolved": "https://registry.npmmirror.com/has-to-string-tag-x/-/has-to-string-tag-x-1.4.1.tgz", + "integrity": "sha512-vdbKfmw+3LoOYVr+mtxHaX5a96+0f3DljYd8JOqvOLsf5mw2Otda2qCDT9qRqLAhrjyQ0h7ual5nOiASpsGNFw==", + "dependencies": { + "has-symbol-support-x": "^1.4.1" + }, + "engines": { + "node": "*" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-unicode": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/has-unicode/-/has-unicode-2.0.1.tgz", + "integrity": "sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==", + "optional": true + }, + "node_modules/has-value": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/has-value/-/has-value-1.0.0.tgz", + "integrity": "sha512-IBXk4GTsLYdQ7Rvt+GRBrFSVEkmuOUy4re0Xjd9kJSUQpnTrWR4/y9RpfexN9vkAPMFuQoeWKwqzPozRTlasGw==", + "dependencies": { + "get-value": "^2.0.6", + "has-values": "^1.0.0", + "isobject": "^3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/has-values": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/has-values/-/has-values-1.0.0.tgz", + "integrity": "sha512-ODYZC64uqzmtfGMEAX/FvZiRyWLpAC3vYnNunURUnkGVTS+mI0smVsWaPydRBsE3g+ok7h960jChO8mFcWlHaQ==", + "dependencies": { + "is-number": "^3.0.0", + "kind-of": "^4.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/has-values/node_modules/is-number": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/is-number/-/is-number-3.0.0.tgz", + "integrity": "sha512-4cboCqIpliH+mAvFNegjZQ4kgKc3ZUhQVr3HvWbSh5q3WH2v82ct+T2Y1hdU5Gdtorx/cLifQjqCbL7bpznLTg==", + "dependencies": { + "kind-of": "^3.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/has-values/node_modules/is-number/node_modules/kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmmirror.com/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "dependencies": { + "is-buffer": "^1.1.5" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/has-values/node_modules/kind-of": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/kind-of/-/kind-of-4.0.0.tgz", + "integrity": "sha512-24XsCxmEbRwEDbz/qz3stgin8TTzZ1ESR56OMCN0ujYg+vRutNSiOj9bHH9u85DKgXguraugV5sFuvbD4FW/hw==", + "dependencies": { + "is-buffer": "^1.1.5" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/hash-base": { + "version": "3.0.4", + "resolved": "https://registry.npmmirror.com/hash-base/-/hash-base-3.0.4.tgz", + "integrity": "sha512-EeeoJKjTyt868liAlVmcv2ZsUfGHlE3Q+BICOXcZiwN3osr5Q/zFGYmTJpoIzuaSTAwndFy+GqhEwlU4L3j4Ow==", + "peer": true, + "dependencies": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/hash-sum": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/hash-sum/-/hash-sum-2.0.0.tgz", + "integrity": "sha512-WdZTbAByD+pHfl/g9QSsBIIwy8IT+EsPiKDs0KNX+zSHhdDLFKdZu0BQHljvO+0QI/BasbMSUa8wYNCZTvhslg==", + "dev": true + }, + "node_modules/hash.js": { + "version": "1.1.7", + "resolved": "https://registry.npmmirror.com/hash.js/-/hash.js-1.1.7.tgz", + "integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==", + "peer": true, + "dependencies": { + "inherits": "^2.0.3", + "minimalistic-assert": "^1.0.1" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "dev": true, + "bin": { + "he": "bin/he" + } + }, + "node_modules/hmac-drbg": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/hmac-drbg/-/hmac-drbg-1.0.1.tgz", + "integrity": "sha512-Tti3gMqLdZfhOQY1Mzf/AanLiqh1WTiJgEj26ZuYQ9fbkLomzGchCws4FyrSd4VkpBfiNhaE1On+lOz894jvXg==", + "peer": true, + "dependencies": { + "hash.js": "^1.0.3", + "minimalistic-assert": "^1.0.0", + "minimalistic-crypto-utils": "^1.0.1" + } + }, + "node_modules/hosted-git-info": { + "version": "2.8.9", + "resolved": "https://registry.npmmirror.com/hosted-git-info/-/hosted-git-info-2.8.9.tgz", + "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==" + }, + "node_modules/hpack.js": { + "version": "2.1.6", + "resolved": "https://registry.npmmirror.com/hpack.js/-/hpack.js-2.1.6.tgz", + "integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==", + "dev": true, + "dependencies": { + "inherits": "^2.0.1", + "obuf": "^1.0.0", + "readable-stream": "^2.0.1", + "wbuf": "^1.1.0" + } + }, + "node_modules/html-encoding-sniffer": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/html-encoding-sniffer/-/html-encoding-sniffer-2.0.1.tgz", + "integrity": "sha512-D5JbOMBIR/TVZkubHT+OyT2705QvogUW4IBn6nHd756OwieSF9aDYFj4dv6HHEVGYbHaLETa3WggZYWWMyy3ZQ==", + "dev": true, + "dependencies": { + "whatwg-encoding": "^1.0.5" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/html-entities": { + "version": "2.5.2", + "resolved": "https://registry.npmmirror.com/html-entities/-/html-entities-2.5.2.tgz", + "integrity": "sha512-K//PSRMQk4FZ78Kyau+mZurHn3FH0Vwr+H36eE0rPbeYkRRi9YxceYPhuN60UwWorxyKHhqoAJl2OFKa4BVtaA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/mdevils" + }, + { + "type": "patreon", + "url": "https://patreon.com/mdevils" + } + ] + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true + }, + "node_modules/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmmirror.com/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw==", + "dev": true, + "dependencies": { + "camel-case": "^4.1.2", + "clean-css": "^5.2.2", + "commander": "^8.3.0", + "he": "^1.2.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.10.0" + }, + "bin": { + "html-minifier-terser": "cli.js" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/html-minifier-terser/node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmmirror.com/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "dev": true, + "engines": { + "node": ">= 12" + } + }, + "node_modules/html-webpack-plugin": { + "version": "5.6.0", + "resolved": "https://registry.npmmirror.com/html-webpack-plugin/-/html-webpack-plugin-5.6.0.tgz", + "integrity": "sha512-iwaY4wzbe48AfKLZ/Cc8k0L+FKG6oSNRaZ8x5A/T/IVDGyXcbHncM9TdDa93wn0FsSm82FhTKW7f3vS61thXAw==", + "dev": true, + "dependencies": { + "@types/html-minifier-terser": "^6.0.0", + "html-minifier-terser": "^6.0.2", + "lodash": "^4.17.21", + "pretty-error": "^4.0.0", + "tapable": "^2.0.0" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/html-webpack-plugin" + }, + "peerDependencies": { + "@rspack/core": "0.x || 1.x", + "webpack": "^5.20.0" + }, + "peerDependenciesMeta": { + "@rspack/core": { + "optional": true + }, + "webpack": { + "optional": true + } + } + }, + "node_modules/html-webpack-plugin/node_modules/tapable": { + "version": "2.2.1", + "resolved": "https://registry.npmmirror.com/tapable/-/tapable-2.2.1.tgz", + "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/htmlparser2": { + "version": "6.1.0", + "resolved": "https://registry.npmmirror.com/htmlparser2/-/htmlparser2-6.1.0.tgz", + "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==", + "dev": true, + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.0.0", + "domutils": "^2.5.2", + "entities": "^2.0.0" + } + }, + "node_modules/http-cache-semantics": { + "version": "3.8.1", + "resolved": "https://registry.npmmirror.com/http-cache-semantics/-/http-cache-semantics-3.8.1.tgz", + "integrity": "sha512-5ai2iksyV8ZXmnZhHH4rWPoxxistEexSi5936zIQ1bnNTW5VnA85B6P/VpXiRM017IgRvb2kKo1a//y+0wSp3w==" + }, + "node_modules/http-deceiver": { + "version": "1.2.7", + "resolved": "https://registry.npmmirror.com/http-deceiver/-/http-deceiver-1.2.7.tgz", + "integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==", + "dev": true + }, + "node_modules/http-errors": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/http-errors/-/http-errors-2.0.0.tgz", + "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "dependencies": { + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/http-parser-js": { + "version": "0.5.8", + "resolved": "https://registry.npmmirror.com/http-parser-js/-/http-parser-js-0.5.8.tgz", + "integrity": "sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q==", + "dev": true + }, + "node_modules/http-proxy": { + "version": "1.18.1", + "resolved": "https://registry.npmmirror.com/http-proxy/-/http-proxy-1.18.1.tgz", + "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", + "dev": true, + "dependencies": { + "eventemitter3": "^4.0.0", + "follow-redirects": "^1.0.0", + "requires-port": "^1.0.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/http-proxy-agent": { + "version": "4.0.1", + "resolved": "https://registry.npmmirror.com/http-proxy-agent/-/http-proxy-agent-4.0.1.tgz", + "integrity": "sha512-k0zdNgqWTGA6aeIRVpvfVob4fL52dTfaehylg0Y4UvSySvOq/Y+BOyPrgpUrA7HylqvU8vIZGsRuXmspskV0Tg==", + "dev": true, + "dependencies": { + "@tootallnate/once": "1", + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/http-proxy-middleware": { + "version": "2.0.6", + "resolved": "https://registry.npmmirror.com/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz", + "integrity": "sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==", + "dev": true, + "dependencies": { + "@types/http-proxy": "^1.17.8", + "http-proxy": "^1.18.1", + "is-glob": "^4.0.1", + "is-plain-obj": "^3.0.0", + "micromatch": "^4.0.2" + }, + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "@types/express": "^4.17.13" + }, + "peerDependenciesMeta": { + "@types/express": { + "optional": true + } + } + }, + "node_modules/http-proxy-middleware/node_modules/is-plain-obj": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/is-plain-obj/-/is-plain-obj-3.0.0.tgz", + "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/http-proxy/node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmmirror.com/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", + "dev": true + }, + "node_modules/https-browserify": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/https-browserify/-/https-browserify-1.0.0.tgz", + "integrity": "sha512-J+FkSdyD+0mA0N+81tMotaRMfSL9SGi+xpD3T6YApKsc3bGSXJlfXri3VyFOeYkfLRQisDk1W+jIFFKBeUBbBg==", + "peer": true + }, + "node_modules/https-proxy-agent": { + "version": "5.0.1", + "resolved": "https://registry.npmmirror.com/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", + "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", + "devOptional": true, + "dependencies": { + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmmirror.com/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/icss-utils": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/icss-utils/-/icss-utils-5.1.0.tgz", + "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==", + "dev": true, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmmirror.com/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/iferr": { + "version": "0.1.5", + "resolved": "https://registry.npmmirror.com/iferr/-/iferr-0.1.5.tgz", + "integrity": "sha512-DUNFN5j7Tln0D+TxzloUjKB+CtVu6myn0JEFak6dG18mNt9YkQ6lzGCdafwofISZ1lLF3xRHJ98VKy9ynkcFaA==", + "peer": true + }, + "node_modules/ignore": { + "version": "5.3.1", + "resolved": "https://registry.npmmirror.com/ignore/-/ignore-5.3.1.tgz", + "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmmirror.com/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "dev": true, + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-fresh/node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/import-global": { + "version": "0.1.0", + "resolved": "https://registry.npmmirror.com/import-global/-/import-global-0.1.0.tgz", + "integrity": "sha512-8+hPJLML+m1ym9NSeZXTXFkY5+ml0fYFAzO5yhZiaFQvk9kO0NkE7vd7e7kCVjkTmAxsDPbrWwLQACMwGTDgIg==", + "dependencies": { + "global-dirs": "^0.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/import-local": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/import-local/-/import-local-3.1.0.tgz", + "integrity": "sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg==", + "dependencies": { + "pkg-dir": "^4.2.0", + "resolve-cwd": "^3.0.0" + }, + "bin": { + "import-local-fixture": "fixtures/cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-local/node_modules/pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmmirror.com/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmmirror.com/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/infer-owner": { + "version": "1.0.4", + "resolved": "https://registry.npmmirror.com/infer-owner/-/infer-owner-1.0.4.tgz", + "integrity": "sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A==", + "peer": true + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmmirror.com/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmmirror.com/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/ini": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/ini/-/ini-2.0.0.tgz", + "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==", + "engines": { + "node": ">=10" + } + }, + "node_modules/inquirer": { + "version": "8.2.6", + "resolved": "https://registry.npmmirror.com/inquirer/-/inquirer-8.2.6.tgz", + "integrity": "sha512-M1WuAmb7pn9zdFRtQYk26ZBoY043Sse0wVDdk4Bppr+JOXyQYybdtvK+l9wUibhtjdjvtoiNy8tk+EgsYIUqKg==", + "dependencies": { + "ansi-escapes": "^4.2.1", + "chalk": "^4.1.1", + "cli-cursor": "^3.1.0", + "cli-width": "^3.0.0", + "external-editor": "^3.0.3", + "figures": "^3.0.0", + "lodash": "^4.17.21", + "mute-stream": "0.0.8", + "ora": "^5.4.1", + "run-async": "^2.4.0", + "rxjs": "^7.5.5", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0", + "through": "^2.3.6", + "wrap-ansi": "^6.0.1" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/inquirer/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/inquirer/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/inquirer/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/inquirer/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/inquirer/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/inquirer/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/inquirer/node_modules/wrap-ansi": { + "version": "6.2.0", + "resolved": "https://registry.npmmirror.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/interpret": { + "version": "3.1.1", + "resolved": "https://registry.npmmirror.com/interpret/-/interpret-3.1.1.tgz", + "integrity": "sha512-6xwYfHbajpoF0xLW+iwLkhwgvLoZDfjYfoFNu8ftMoXINzwuymNLd9u/KmwtdT2GbR+/Cz66otEGEVVUHX9QLQ==", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/into-stream": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/into-stream/-/into-stream-3.1.0.tgz", + "integrity": "sha512-TcdjPibTksa1NQximqep2r17ISRiNE9fwlfbg3F8ANdvP5/yrFTew86VcO//jk4QTaMlbjypPBq76HN2zaKfZQ==", + "dependencies": { + "from2": "^2.1.1", + "p-is-promise": "^1.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmmirror.com/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/is-accessor-descriptor": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/is-accessor-descriptor/-/is-accessor-descriptor-1.0.1.tgz", + "integrity": "sha512-YBUanLI8Yoihw923YeFUS5fs0fF2f5TSFTNiYAAzhhDscDa3lEqYuz1pDOEP5KvX94I9ey3vsqjJcLVFVU+3QA==", + "dependencies": { + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/is-arguments": { + "version": "1.1.1", + "resolved": "https://registry.npmmirror.com/is-arguments/-/is-arguments-1.1.1.tgz", + "integrity": "sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==", + "dependencies": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmmirror.com/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==" + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "devOptional": true, + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-buffer": { + "version": "1.1.6", + "resolved": "https://registry.npmmirror.com/is-buffer/-/is-buffer-1.1.6.tgz", + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" + }, + "node_modules/is-callable": { + "version": "1.2.7", + "resolved": "https://registry.npmmirror.com/is-callable/-/is-callable-1.2.7.tgz", + "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-ci": { + "version": "1.2.1", + "resolved": "https://registry.npmmirror.com/is-ci/-/is-ci-1.2.1.tgz", + "integrity": "sha512-s6tfsaQaQi3JNciBH6shVqEDvhGut0SUXr31ag8Pd8BBbVVlcGfWhpPmEOoM6RJ5TFhbypvf5yyRw/VXW1IiWg==", + "dev": true, + "dependencies": { + "ci-info": "^1.5.0" + }, + "bin": { + "is-ci": "bin.js" + } + }, + "node_modules/is-ci/node_modules/ci-info": { + "version": "1.6.0", + "resolved": "https://registry.npmmirror.com/ci-info/-/ci-info-1.6.0.tgz", + "integrity": "sha512-vsGdkwSCDpWmP80ncATX7iea5DWQemg1UgCW5J8tqjU3lYw4FBYuj89J0CTVomA7BEfvSZd84GmHko+MxFQU2A==", + "dev": true + }, + "node_modules/is-core-module": { + "version": "2.13.1", + "resolved": "https://registry.npmmirror.com/is-core-module/-/is-core-module-2.13.1.tgz", + "integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==", + "dependencies": { + "hasown": "^2.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-data-descriptor": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/is-data-descriptor/-/is-data-descriptor-1.0.1.tgz", + "integrity": "sha512-bc4NlCDiCr28U4aEsQ3Qs2491gVq4V8G7MQyws968ImqjKuYtTJXrl7Vq7jsN7Ly/C3xj5KWFrY7sHNeDkAzXw==", + "dependencies": { + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/is-descriptor": { + "version": "1.0.3", + "resolved": "https://registry.npmmirror.com/is-descriptor/-/is-descriptor-1.0.3.tgz", + "integrity": "sha512-JCNNGbwWZEVaSPtS45mdtrneRWJFp07LLmykxeFV5F6oBvNF8vHSfJuJgoT472pSfk+Mf8VnlrspaFBHWM8JAw==", + "dependencies": { + "is-accessor-descriptor": "^1.0.1", + "is-data-descriptor": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/is-docker": { + "version": "2.2.1", + "resolved": "https://registry.npmmirror.com/is-docker/-/is-docker-2.2.1.tgz", + "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmmirror.com/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmmirror.com/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-file-esm": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/is-file-esm/-/is-file-esm-1.0.0.tgz", + "integrity": "sha512-rZlaNKb4Mr8WlRu2A9XdeoKgnO5aA53XdPHgCKVyCrQ/rWi89RET1+bq37Ru46obaQXeiX4vmFIm1vks41hoSA==", + "dev": true, + "dependencies": { + "read-pkg-up": "^7.0.1" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/is-generator-fn/-/is-generator-fn-2.1.0.tgz", + "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/is-generator-function": { + "version": "1.0.10", + "resolved": "https://registry.npmmirror.com/is-generator-function/-/is-generator-function-1.0.10.tgz", + "integrity": "sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmmirror.com/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-interactive": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/is-interactive/-/is-interactive-1.0.0.tgz", + "integrity": "sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-natural-number": { + "version": "4.0.1", + "resolved": "https://registry.npmmirror.com/is-natural-number/-/is-natural-number-4.0.1.tgz", + "integrity": "sha512-Y4LTamMe0DDQIIAlaer9eKebAlDSV6huy+TWhJVPlzZh2o4tRP5SQWFlLn5N0To4mDD22/qdOq+veo1cSISLgQ==" + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmmirror.com/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-object": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/is-object/-/is-object-1.0.2.tgz", + "integrity": "sha512-2rRIahhZr2UWb45fIOuvZGpFtz0TyOZLf32KxBbSoUCeZR495zCKlWUKKUByk3geS2eAs7ZAABt0Y/Rx0GiQGA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-plain-obj": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/is-plain-obj/-/is-plain-obj-1.1.0.tgz", + "integrity": "sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmmirror.com/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dependencies": { + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-potential-custom-element-name": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz", + "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==", + "dev": true + }, + "node_modules/is-promise": { + "version": "2.2.2", + "resolved": "https://registry.npmmirror.com/is-promise/-/is-promise-2.2.2.tgz", + "integrity": "sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ==" + }, + "node_modules/is-retry-allowed": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/is-retry-allowed/-/is-retry-allowed-1.2.0.tgz", + "integrity": "sha512-RUbUeKwvm3XG2VYamhJL1xFktgjvPzL0Hq8C+6yrWIswDy3BIXGqCxhxkc30N9jqK311gVU137K8Ei55/zVJRg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-stream": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/is-stream/-/is-stream-1.1.0.tgz", + "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-typed-array": { + "version": "1.1.13", + "resolved": "https://registry.npmmirror.com/is-typed-array/-/is-typed-array-1.1.13.tgz", + "integrity": "sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw==", + "dependencies": { + "which-typed-array": "^1.1.14" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==" + }, + "node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmmirror.com/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-whitespace": { + "version": "0.3.0", + "resolved": "https://registry.npmmirror.com/is-whitespace/-/is-whitespace-0.3.0.tgz", + "integrity": "sha512-RydPhl4S6JwAyj0JJjshWJEFG6hNye3pZFBRZaTUfZFwGHxzppNaNOVgQuS/E/SlhrApuMXrpnK1EEIXfdo3Dg==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-windows": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/is-windows/-/is-windows-1.0.2.tgz", + "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-wsl": { + "version": "2.2.0", + "resolved": "https://registry.npmmirror.com/is-wsl/-/is-wsl-2.2.0.tgz", + "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", + "dependencies": { + "is-docker": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + }, + "node_modules/isbinaryfile": { + "version": "4.0.10", + "resolved": "https://registry.npmmirror.com/isbinaryfile/-/isbinaryfile-4.0.10.tgz", + "integrity": "sha512-iHrqe5shvBUcFbmZq9zOQHBoeOhZJu6RQGrDpBgenUm/Am+F3JM2MgQj+rK3Z601fzrL5gLZWtAPH2OBaSVcyw==", + "engines": { + "node": ">= 8.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/gjtorikian/" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + }, + "node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.2", + "resolved": "https://registry.npmmirror.com/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-instrument": { + "version": "5.2.1", + "resolved": "https://registry.npmmirror.com/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz", + "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==", + "dev": true, + "dependencies": { + "@babel/core": "^7.12.3", + "@babel/parser": "^7.14.7", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", + "dev": true, + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-report/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-report/node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/istanbul-lib-report/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-report/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-source-maps": { + "version": "4.0.1", + "resolved": "https://registry.npmmirror.com/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", + "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", + "dev": true, + "dependencies": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-reports": { + "version": "3.1.7", + "resolved": "https://registry.npmmirror.com/istanbul-reports/-/istanbul-reports-3.1.7.tgz", + "integrity": "sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g==", + "dev": true, + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/isurl": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/isurl/-/isurl-1.0.0.tgz", + "integrity": "sha512-1P/yWsxPlDtn7QeRD+ULKQPaIaN6yF368GZ2vDfv0AL0NwpStafjWCDDdn0k8wgFMWpVAqG7oJhxHnlud42i9w==", + "dependencies": { + "has-to-string-tag-x": "^1.2.0", + "is-object": "^1.0.1" + }, + "engines": { + "node": ">= 4" + } + }, + "node_modules/iterall": { + "version": "1.3.0", + "resolved": "https://registry.npmmirror.com/iterall/-/iterall-1.3.0.tgz", + "integrity": "sha512-QZ9qOMdF+QLHxy1QIpUHUU1D5pS2CG2P69LF6L6CPjPYA/XMOmKV3PZpawHoAjHNyB0swdVTRxdYT4tbBbxqwg==" + }, + "node_modules/jackspeak": { + "version": "2.3.6", + "resolved": "https://registry.npmmirror.com/jackspeak/-/jackspeak-2.3.6.tgz", + "integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==", + "dev": true, + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, + "node_modules/jake": { + "version": "10.9.1", + "resolved": "https://registry.npmmirror.com/jake/-/jake-10.9.1.tgz", + "integrity": "sha512-61btcOHNnLnsOdtLgA5efqQWjnSi/vow5HbI7HMdKKWqvrKR1bLK3BPlJn9gcSaP2ewuamUSMB5XEy76KUIS2w==", + "dependencies": { + "async": "^3.2.3", + "chalk": "^4.0.2", + "filelist": "^1.0.4", + "minimatch": "^3.1.2" + }, + "bin": { + "jake": "bin/cli.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/jake/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jake/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/jake/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/jake/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/jake/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/jake/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/javascript-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/javascript-stringify/-/javascript-stringify-2.1.0.tgz", + "integrity": "sha512-JVAfqNPTvNq3sB/VHQJAFxN/sPgKnsKrCwyRt15zwNCdrMMJDdcEOdubuy+DuJYYdm0ox1J4uzEuYKkN+9yhVg==" + }, + "node_modules/jest": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest/-/jest-27.5.1.tgz", + "integrity": "sha512-Yn0mADZB89zTtjkPJEXwrac3LHudkQMR+Paqa8uxJHCBr9agxztUifWCyiYrjhMPBoUVBjyny0I7XH6ozDr7QQ==", + "dev": true, + "dependencies": { + "@jest/core": "^27.5.1", + "import-local": "^3.0.2", + "jest-cli": "^27.5.1" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-changed-files": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-changed-files/-/jest-changed-files-27.5.1.tgz", + "integrity": "sha512-buBLMiByfWGCoMsLLzGUUSpAmIAGnbR2KJoMN10ziLhOLvP4e0SlypHnAel8iqQXTrcbmfEY9sSqae5sgUsTvw==", + "dev": true, + "dependencies": { + "@jest/types": "^27.5.1", + "execa": "^5.0.0", + "throat": "^6.0.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-changed-files/node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmmirror.com/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/jest-changed-files/node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/jest-changed-files/node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/jest-changed-files/node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmmirror.com/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-circus": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-circus/-/jest-circus-27.5.1.tgz", + "integrity": "sha512-D95R7x5UtlMA5iBYsOHFFbMD/GVA4R/Kdq15f7xYWUfWHBto9NYRsOvnSauTgdF+ogCpJ4tyKOXhUifxS65gdw==", + "dev": true, + "dependencies": { + "@jest/environment": "^27.5.1", + "@jest/test-result": "^27.5.1", + "@jest/types": "^27.5.1", + "@types/node": "*", + "chalk": "^4.0.0", + "co": "^4.6.0", + "dedent": "^0.7.0", + "expect": "^27.5.1", + "is-generator-fn": "^2.0.0", + "jest-each": "^27.5.1", + "jest-matcher-utils": "^27.5.1", + "jest-message-util": "^27.5.1", + "jest-runtime": "^27.5.1", + "jest-snapshot": "^27.5.1", + "jest-util": "^27.5.1", + "pretty-format": "^27.5.1", + "slash": "^3.0.0", + "stack-utils": "^2.0.3", + "throat": "^6.0.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-circus/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-circus/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/jest-circus/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/jest-circus/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/jest-circus/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-circus/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-cli": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-cli/-/jest-cli-27.5.1.tgz", + "integrity": "sha512-Hc6HOOwYq4/74/c62dEE3r5elx8wjYqxY0r0G/nFrLDPMFRu6RA/u8qINOIkvhxG7mMQ5EJsOGfRpI8L6eFUVw==", + "dev": true, + "dependencies": { + "@jest/core": "^27.5.1", + "@jest/test-result": "^27.5.1", + "@jest/types": "^27.5.1", + "chalk": "^4.0.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "import-local": "^3.0.2", + "jest-config": "^27.5.1", + "jest-util": "^27.5.1", + "jest-validate": "^27.5.1", + "prompts": "^2.0.1", + "yargs": "^16.2.0" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-cli/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-cli/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/jest-cli/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/jest-cli/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/jest-cli/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-cli/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-config": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-config/-/jest-config-27.5.1.tgz", + "integrity": "sha512-5sAsjm6tGdsVbW9ahcChPAFCk4IlkQUknH5AvKjuLTSlcO/wCZKyFdn7Rg0EkC+OGgWODEy2hDpWB1PgzH0JNA==", + "dev": true, + "dependencies": { + "@babel/core": "^7.8.0", + "@jest/test-sequencer": "^27.5.1", + "@jest/types": "^27.5.1", + "babel-jest": "^27.5.1", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "deepmerge": "^4.2.2", + "glob": "^7.1.1", + "graceful-fs": "^4.2.9", + "jest-circus": "^27.5.1", + "jest-environment-jsdom": "^27.5.1", + "jest-environment-node": "^27.5.1", + "jest-get-type": "^27.5.1", + "jest-jasmine2": "^27.5.1", + "jest-regex-util": "^27.5.1", + "jest-resolve": "^27.5.1", + "jest-runner": "^27.5.1", + "jest-util": "^27.5.1", + "jest-validate": "^27.5.1", + "micromatch": "^4.0.4", + "parse-json": "^5.2.0", + "pretty-format": "^27.5.1", + "slash": "^3.0.0", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + }, + "peerDependencies": { + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "ts-node": { + "optional": true + } + } + }, + "node_modules/jest-config/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-config/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/jest-config/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/jest-config/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/jest-config/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-config/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-diff": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-diff/-/jest-diff-27.5.1.tgz", + "integrity": "sha512-m0NvkX55LDt9T4mctTEgnZk3fmEg3NRYutvMPWM/0iPnkFj2wIeF45O1718cMSOFO1vINkqmxqD8vE37uTEbqw==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "diff-sequences": "^27.5.1", + "jest-get-type": "^27.5.1", + "pretty-format": "^27.5.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-diff/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-diff/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/jest-diff/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/jest-diff/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/jest-diff/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-diff/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-docblock": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-docblock/-/jest-docblock-27.5.1.tgz", + "integrity": "sha512-rl7hlABeTsRYxKiUfpHrQrG4e2obOiTQWfMEH3PxPjOtdsfLQO4ReWSZaQ7DETm4xu07rl4q/h4zcKXyU0/OzQ==", + "dev": true, + "dependencies": { + "detect-newline": "^3.0.0" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-each": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-each/-/jest-each-27.5.1.tgz", + "integrity": "sha512-1Ff6p+FbhT/bXQnEouYy00bkNSY7OUpfIcmdl8vZ31A1UUaurOLPA8a8BbJOF2RDUElwJhmeaV7LnagI+5UwNQ==", + "dev": true, + "dependencies": { + "@jest/types": "^27.5.1", + "chalk": "^4.0.0", + "jest-get-type": "^27.5.1", + "jest-util": "^27.5.1", + "pretty-format": "^27.5.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-each/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-each/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/jest-each/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/jest-each/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/jest-each/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-each/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-environment-jsdom": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-environment-jsdom/-/jest-environment-jsdom-27.5.1.tgz", + "integrity": "sha512-TFBvkTC1Hnnnrka/fUb56atfDtJ9VMZ94JkjTbggl1PEpwrYtUBKMezB3inLmWqQsXYLcMwNoDQwoBTAvFfsfw==", + "dev": true, + "dependencies": { + "@jest/environment": "^27.5.1", + "@jest/fake-timers": "^27.5.1", + "@jest/types": "^27.5.1", + "@types/node": "*", + "jest-mock": "^27.5.1", + "jest-util": "^27.5.1", + "jsdom": "^16.6.0" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-environment-node": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-environment-node/-/jest-environment-node-27.5.1.tgz", + "integrity": "sha512-Jt4ZUnxdOsTGwSRAfKEnE6BcwsSPNOijjwifq5sDFSA2kesnXTvNqKHYgM0hDq3549Uf/KzdXNYn4wMZJPlFLw==", + "dev": true, + "dependencies": { + "@jest/environment": "^27.5.1", + "@jest/fake-timers": "^27.5.1", + "@jest/types": "^27.5.1", + "@types/node": "*", + "jest-mock": "^27.5.1", + "jest-util": "^27.5.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-get-type": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-get-type/-/jest-get-type-27.5.1.tgz", + "integrity": "sha512-2KY95ksYSaK7DMBWQn6dQz3kqAf3BB64y2udeG+hv4KfSOb9qwcYQstTJc1KCbsix+wLZWZYN8t7nwX3GOBLRw==", + "dev": true, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-haste-map": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-haste-map/-/jest-haste-map-27.5.1.tgz", + "integrity": "sha512-7GgkZ4Fw4NFbMSDSpZwXeBiIbx+t/46nJ2QitkOjvwPYyZmqttu2TDSimMHP1EkPOi4xUZAN1doE5Vd25H4Jng==", + "dev": true, + "dependencies": { + "@jest/types": "^27.5.1", + "@types/graceful-fs": "^4.1.2", + "@types/node": "*", + "anymatch": "^3.0.3", + "fb-watchman": "^2.0.0", + "graceful-fs": "^4.2.9", + "jest-regex-util": "^27.5.1", + "jest-serializer": "^27.5.1", + "jest-util": "^27.5.1", + "jest-worker": "^27.5.1", + "micromatch": "^4.0.4", + "walker": "^1.0.7" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + }, + "optionalDependencies": { + "fsevents": "^2.3.2" + } + }, + "node_modules/jest-jasmine2": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-jasmine2/-/jest-jasmine2-27.5.1.tgz", + "integrity": "sha512-jtq7VVyG8SqAorDpApwiJJImd0V2wv1xzdheGHRGyuT7gZm6gG47QEskOlzsN1PG/6WNaCo5pmwMHDf3AkG2pQ==", + "dev": true, + "dependencies": { + "@jest/environment": "^27.5.1", + "@jest/source-map": "^27.5.1", + "@jest/test-result": "^27.5.1", + "@jest/types": "^27.5.1", + "@types/node": "*", + "chalk": "^4.0.0", + "co": "^4.6.0", + "expect": "^27.5.1", + "is-generator-fn": "^2.0.0", + "jest-each": "^27.5.1", + "jest-matcher-utils": "^27.5.1", + "jest-message-util": "^27.5.1", + "jest-runtime": "^27.5.1", + "jest-snapshot": "^27.5.1", + "jest-util": "^27.5.1", + "pretty-format": "^27.5.1", + "throat": "^6.0.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-jasmine2/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-jasmine2/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/jest-jasmine2/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/jest-jasmine2/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/jest-jasmine2/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-jasmine2/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-leak-detector": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-leak-detector/-/jest-leak-detector-27.5.1.tgz", + "integrity": "sha512-POXfWAMvfU6WMUXftV4HolnJfnPOGEu10fscNCA76KBpRRhcMN2c8d3iT2pxQS3HLbA+5X4sOUPzYO2NUyIlHQ==", + "dev": true, + "dependencies": { + "jest-get-type": "^27.5.1", + "pretty-format": "^27.5.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-matcher-utils": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-matcher-utils/-/jest-matcher-utils-27.5.1.tgz", + "integrity": "sha512-z2uTx/T6LBaCoNWNFWwChLBKYxTMcGBRjAt+2SbP929/Fflb9aa5LGma654Rz8z9HLxsrUaYzxE9T/EFIL/PAw==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "jest-diff": "^27.5.1", + "jest-get-type": "^27.5.1", + "pretty-format": "^27.5.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-matcher-utils/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-matcher-utils/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/jest-matcher-utils/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/jest-matcher-utils/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/jest-matcher-utils/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-matcher-utils/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-message-util": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-message-util/-/jest-message-util-27.5.1.tgz", + "integrity": "sha512-rMyFe1+jnyAAf+NHwTclDz0eAaLkVDdKVHHBFWsBWHnnh5YeJMNWWsv7AbFYXfK3oTqvL7VTWkhNLu1jX24D+g==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@jest/types": "^27.5.1", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^27.5.1", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-message-util/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-message-util/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/jest-message-util/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/jest-message-util/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/jest-message-util/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-message-util/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-mock": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-mock/-/jest-mock-27.5.1.tgz", + "integrity": "sha512-K4jKbY1d4ENhbrG2zuPWaQBvDly+iZ2yAW+T1fATN78hc0sInwn7wZB8XtlNnvHug5RMwV897Xm4LqmPM4e2Og==", + "dev": true, + "dependencies": { + "@jest/types": "^27.5.1", + "@types/node": "*" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-pnp-resolver": { + "version": "1.2.3", + "resolved": "https://registry.npmmirror.com/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz", + "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==", + "dev": true, + "engines": { + "node": ">=6" + }, + "peerDependencies": { + "jest-resolve": "*" + }, + "peerDependenciesMeta": { + "jest-resolve": { + "optional": true + } + } + }, + "node_modules/jest-regex-util": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-regex-util/-/jest-regex-util-27.5.1.tgz", + "integrity": "sha512-4bfKq2zie+x16okqDXjXn9ql2B0dScQu+vcwe4TvFVhkVyuWLqpZrZtXxLLWoXYgn0E87I6r6GRYHF7wFZBUvg==", + "dev": true, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-resolve": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-resolve/-/jest-resolve-27.5.1.tgz", + "integrity": "sha512-FFDy8/9E6CV83IMbDpcjOhumAQPDyETnU2KZ1O98DwTnz8AOBsW/Xv3GySr1mOZdItLR+zDZ7I/UdTFbgSOVCw==", + "dev": true, + "dependencies": { + "@jest/types": "^27.5.1", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^27.5.1", + "jest-pnp-resolver": "^1.2.2", + "jest-util": "^27.5.1", + "jest-validate": "^27.5.1", + "resolve": "^1.20.0", + "resolve.exports": "^1.1.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-resolve-dependencies": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-resolve-dependencies/-/jest-resolve-dependencies-27.5.1.tgz", + "integrity": "sha512-QQOOdY4PE39iawDn5rzbIePNigfe5B9Z91GDD1ae/xNDlu9kaat8QQ5EKnNmVWPV54hUdxCVwwj6YMgR2O7IOg==", + "dev": true, + "dependencies": { + "@jest/types": "^27.5.1", + "jest-regex-util": "^27.5.1", + "jest-snapshot": "^27.5.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-resolve/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-resolve/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/jest-resolve/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/jest-resolve/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/jest-resolve/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-resolve/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-runner": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-runner/-/jest-runner-27.5.1.tgz", + "integrity": "sha512-g4NPsM4mFCOwFKXO4p/H/kWGdJp9V8kURY2lX8Me2drgXqG7rrZAx5kv+5H7wtt/cdFIjhqYx1HrlqWHaOvDaQ==", + "dev": true, + "dependencies": { + "@jest/console": "^27.5.1", + "@jest/environment": "^27.5.1", + "@jest/test-result": "^27.5.1", + "@jest/transform": "^27.5.1", + "@jest/types": "^27.5.1", + "@types/node": "*", + "chalk": "^4.0.0", + "emittery": "^0.8.1", + "graceful-fs": "^4.2.9", + "jest-docblock": "^27.5.1", + "jest-environment-jsdom": "^27.5.1", + "jest-environment-node": "^27.5.1", + "jest-haste-map": "^27.5.1", + "jest-leak-detector": "^27.5.1", + "jest-message-util": "^27.5.1", + "jest-resolve": "^27.5.1", + "jest-runtime": "^27.5.1", + "jest-util": "^27.5.1", + "jest-worker": "^27.5.1", + "source-map-support": "^0.5.6", + "throat": "^6.0.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-runner/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-runner/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/jest-runner/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/jest-runner/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/jest-runner/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-runner/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-runtime": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-runtime/-/jest-runtime-27.5.1.tgz", + "integrity": "sha512-o7gxw3Gf+H2IGt8fv0RiyE1+r83FJBRruoA+FXrlHw6xEyBsU8ugA6IPfTdVyA0w8HClpbK+DGJxH59UrNMx8A==", + "dev": true, + "dependencies": { + "@jest/environment": "^27.5.1", + "@jest/fake-timers": "^27.5.1", + "@jest/globals": "^27.5.1", + "@jest/source-map": "^27.5.1", + "@jest/test-result": "^27.5.1", + "@jest/transform": "^27.5.1", + "@jest/types": "^27.5.1", + "chalk": "^4.0.0", + "cjs-module-lexer": "^1.0.0", + "collect-v8-coverage": "^1.0.0", + "execa": "^5.0.0", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^27.5.1", + "jest-message-util": "^27.5.1", + "jest-mock": "^27.5.1", + "jest-regex-util": "^27.5.1", + "jest-resolve": "^27.5.1", + "jest-snapshot": "^27.5.1", + "jest-util": "^27.5.1", + "slash": "^3.0.0", + "strip-bom": "^4.0.0" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-runtime/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-runtime/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/jest-runtime/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/jest-runtime/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/jest-runtime/node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmmirror.com/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/jest-runtime/node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/jest-runtime/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-runtime/node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/jest-runtime/node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmmirror.com/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-runtime/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-serializer": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-serializer/-/jest-serializer-27.5.1.tgz", + "integrity": "sha512-jZCyo6iIxO1aqUxpuBlwTDMkzOAJS4a3eYz3YzgxxVQFwLeSA7Jfq5cbqCY+JLvTDrWirgusI/0KwxKMgrdf7w==", + "dev": true, + "dependencies": { + "@types/node": "*", + "graceful-fs": "^4.2.9" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-serializer-vue": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/jest-serializer-vue/-/jest-serializer-vue-2.0.2.tgz", + "integrity": "sha512-nK/YIFo6qe3i9Ge+hr3h4PpRehuPPGZFt8LDBdTHYldMb7ZWlkanZS8Ls7D8h6qmQP2lBQVDLP0DKn5bJ9QApQ==", + "dev": true, + "dependencies": { + "pretty": "2.0.0" + } + }, + "node_modules/jest-snapshot": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-snapshot/-/jest-snapshot-27.5.1.tgz", + "integrity": "sha512-yYykXI5a0I31xX67mgeLw1DZ0bJB+gpq5IpSuCAoyDi0+BhgU/RIrL+RTzDmkNTchvDFWKP8lp+w/42Z3us5sA==", + "dev": true, + "dependencies": { + "@babel/core": "^7.7.2", + "@babel/generator": "^7.7.2", + "@babel/plugin-syntax-typescript": "^7.7.2", + "@babel/traverse": "^7.7.2", + "@babel/types": "^7.0.0", + "@jest/transform": "^27.5.1", + "@jest/types": "^27.5.1", + "@types/babel__traverse": "^7.0.4", + "@types/prettier": "^2.1.5", + "babel-preset-current-node-syntax": "^1.0.0", + "chalk": "^4.0.0", + "expect": "^27.5.1", + "graceful-fs": "^4.2.9", + "jest-diff": "^27.5.1", + "jest-get-type": "^27.5.1", + "jest-haste-map": "^27.5.1", + "jest-matcher-utils": "^27.5.1", + "jest-message-util": "^27.5.1", + "jest-util": "^27.5.1", + "natural-compare": "^1.4.0", + "pretty-format": "^27.5.1", + "semver": "^7.3.2" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-snapshot/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-snapshot/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/jest-snapshot/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/jest-snapshot/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/jest-snapshot/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-snapshot/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/jest-snapshot/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-transform-stub": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/jest-transform-stub/-/jest-transform-stub-2.0.0.tgz", + "integrity": "sha512-lspHaCRx/mBbnm3h4uMMS3R5aZzMwyNpNIJLXj4cEsV0mIUtS4IjYJLSoyjRCtnxb6RIGJ4NL2quZzfIeNhbkg==", + "dev": true + }, + "node_modules/jest-util": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-util/-/jest-util-27.5.1.tgz", + "integrity": "sha512-Kv2o/8jNvX1MQ0KGtw480E/w4fBCDOnH6+6DmeKi6LZUIlKA5kwY0YNdlzaWTiVgxqAqik11QyxDOKk543aKXw==", + "dev": true, + "dependencies": { + "@jest/types": "^27.5.1", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-util/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-util/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/jest-util/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/jest-util/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/jest-util/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-util/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-validate": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-validate/-/jest-validate-27.5.1.tgz", + "integrity": "sha512-thkNli0LYTmOI1tDB3FI1S1RTp/Bqyd9pTarJwL87OIBFuqEb5Apv5EaApEudYg4g86e3CT6kM0RowkhtEnCBQ==", + "dev": true, + "dependencies": { + "@jest/types": "^27.5.1", + "camelcase": "^6.2.0", + "chalk": "^4.0.0", + "jest-get-type": "^27.5.1", + "leven": "^3.1.0", + "pretty-format": "^27.5.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-validate/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-validate/node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmmirror.com/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/jest-validate/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/jest-validate/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/jest-validate/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/jest-validate/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-validate/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-watch-typeahead": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/jest-watch-typeahead/-/jest-watch-typeahead-1.1.0.tgz", + "integrity": "sha512-Va5nLSJTN7YFtC2jd+7wsoe1pNe5K4ShLux/E5iHEwlB9AxaxmggY7to9KUqKojhaJw3aXqt5WAb4jGPOolpEw==", + "dev": true, + "dependencies": { + "ansi-escapes": "^4.3.1", + "chalk": "^4.0.0", + "jest-regex-util": "^28.0.0", + "jest-watcher": "^28.0.0", + "slash": "^4.0.0", + "string-length": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "jest": "^27.0.0 || ^28.0.0" + } + }, + "node_modules/jest-watch-typeahead/node_modules/@jest/console": { + "version": "28.1.3", + "resolved": "https://registry.npmmirror.com/@jest/console/-/console-28.1.3.tgz", + "integrity": "sha512-QPAkP5EwKdK/bxIr6C1I4Vs0rm2nHiANzj/Z5X2JQkrZo6IqvC4ldZ9K95tF0HdidhA8Bo6egxSzUFPYKcEXLw==", + "dev": true, + "dependencies": { + "@jest/types": "^28.1.3", + "@types/node": "*", + "chalk": "^4.0.0", + "jest-message-util": "^28.1.3", + "jest-util": "^28.1.3", + "slash": "^3.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || ^16.10.0 || >=17.0.0" + } + }, + "node_modules/jest-watch-typeahead/node_modules/@jest/console/node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-watch-typeahead/node_modules/@jest/test-result": { + "version": "28.1.3", + "resolved": "https://registry.npmmirror.com/@jest/test-result/-/test-result-28.1.3.tgz", + "integrity": "sha512-kZAkxnSE+FqE8YjW8gNuoVkkC9I7S1qmenl8sGcDOLropASP+BkcGKwhXoyqQuGOGeYY0y/ixjrd/iERpEXHNg==", + "dev": true, + "dependencies": { + "@jest/console": "^28.1.3", + "@jest/types": "^28.1.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "collect-v8-coverage": "^1.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || ^16.10.0 || >=17.0.0" + } + }, + "node_modules/jest-watch-typeahead/node_modules/@jest/types": { + "version": "28.1.3", + "resolved": "https://registry.npmmirror.com/@jest/types/-/types-28.1.3.tgz", + "integrity": "sha512-RyjiyMUZrKz/c+zlMFO1pm70DcIlST8AeWTkoUdZevew44wcNZQHsEVOiCVtgVnlFFD82FPaXycys58cf2muVQ==", + "dev": true, + "dependencies": { + "@jest/schemas": "^28.1.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || ^16.10.0 || >=17.0.0" + } + }, + "node_modules/jest-watch-typeahead/node_modules/@types/yargs": { + "version": "17.0.32", + "resolved": "https://registry.npmmirror.com/@types/yargs/-/yargs-17.0.32.tgz", + "integrity": "sha512-xQ67Yc/laOG5uMfX/093MRlGGCIBzZMarVa+gfNKJxWAIgykYpVGkBdbqEzGDDfCrVUj6Hiff4mTZ5BA6TmAog==", + "dev": true, + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/jest-watch-typeahead/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-watch-typeahead/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/jest-watch-typeahead/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/jest-watch-typeahead/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/jest-watch-typeahead/node_modules/emittery": { + "version": "0.10.2", + "resolved": "https://registry.npmmirror.com/emittery/-/emittery-0.10.2.tgz", + "integrity": "sha512-aITqOwnLanpHLNXZJENbOgjUBeHocD+xsSJmNrjovKBW5HbSpW3d1pEls7GFQPUWXiwG9+0P4GtHfEqC/4M0Iw==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sindresorhus/emittery?sponsor=1" + } + }, + "node_modules/jest-watch-typeahead/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-watch-typeahead/node_modules/jest-message-util": { + "version": "28.1.3", + "resolved": "https://registry.npmmirror.com/jest-message-util/-/jest-message-util-28.1.3.tgz", + "integrity": "sha512-PFdn9Iewbt575zKPf1286Ht9EPoJmYT7P0kY+RibeYZ2XtOr53pDLEFoTWXbd1h4JiGiWpTBC84fc8xMXQMb7g==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@jest/types": "^28.1.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^28.1.3", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || ^16.10.0 || >=17.0.0" + } + }, + "node_modules/jest-watch-typeahead/node_modules/jest-message-util/node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-watch-typeahead/node_modules/jest-regex-util": { + "version": "28.0.2", + "resolved": "https://registry.npmmirror.com/jest-regex-util/-/jest-regex-util-28.0.2.tgz", + "integrity": "sha512-4s0IgyNIy0y9FK+cjoVYoxamT7Zeo7MhzqRGx7YDYmaQn1wucY9rotiGkBzzcMXTtjrCAP/f7f+E0F7+fxPNdw==", + "dev": true, + "engines": { + "node": "^12.13.0 || ^14.15.0 || ^16.10.0 || >=17.0.0" + } + }, + "node_modules/jest-watch-typeahead/node_modules/jest-util": { + "version": "28.1.3", + "resolved": "https://registry.npmmirror.com/jest-util/-/jest-util-28.1.3.tgz", + "integrity": "sha512-XdqfpHwpcSRko/C35uLYFM2emRAltIIKZiJ9eAmhjsj0CqZMa0p1ib0R5fWIqGhn1a103DebTbpqIaP1qCQ6tQ==", + "dev": true, + "dependencies": { + "@jest/types": "^28.1.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || ^16.10.0 || >=17.0.0" + } + }, + "node_modules/jest-watch-typeahead/node_modules/jest-watcher": { + "version": "28.1.3", + "resolved": "https://registry.npmmirror.com/jest-watcher/-/jest-watcher-28.1.3.tgz", + "integrity": "sha512-t4qcqj9hze+jviFPUN3YAtAEeFnr/azITXQEMARf5cMwKY2SMBRnCQTXLixTl20OR6mLh9KLMrgVJgJISym+1g==", + "dev": true, + "dependencies": { + "@jest/test-result": "^28.1.3", + "@jest/types": "^28.1.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "emittery": "^0.10.2", + "jest-util": "^28.1.3", + "string-length": "^4.0.1" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || ^16.10.0 || >=17.0.0" + } + }, + "node_modules/jest-watch-typeahead/node_modules/jest-watcher/node_modules/string-length": { + "version": "4.0.2", + "resolved": "https://registry.npmmirror.com/string-length/-/string-length-4.0.2.tgz", + "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", + "dev": true, + "dependencies": { + "char-regex": "^1.0.2", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/jest-watch-typeahead/node_modules/jest-watcher/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-watch-typeahead/node_modules/pretty-format": { + "version": "28.1.3", + "resolved": "https://registry.npmmirror.com/pretty-format/-/pretty-format-28.1.3.tgz", + "integrity": "sha512-8gFb/To0OmxHR9+ZTb14Df2vNxdGCX8g1xWGUTqUw5TiZvcQf5sHKObd5UcPyLLyowNwDAMTF3XWOG1B6mxl1Q==", + "dev": true, + "dependencies": { + "@jest/schemas": "^28.1.3", + "ansi-regex": "^5.0.1", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || ^16.10.0 || >=17.0.0" + } + }, + "node_modules/jest-watch-typeahead/node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-watch-typeahead/node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmmirror.com/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true + }, + "node_modules/jest-watch-typeahead/node_modules/slash": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/slash/-/slash-4.0.0.tgz", + "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/jest-watch-typeahead/node_modules/string-length": { + "version": "5.0.1", + "resolved": "https://registry.npmmirror.com/string-length/-/string-length-5.0.1.tgz", + "integrity": "sha512-9Ep08KAMUn0OadnVaBuRdE2l615CQ508kr0XMadjClfYpdCyvrbFp6Taebo8yyxokQ4viUd/xPPUA4FGgUa0ow==", + "dev": true, + "dependencies": { + "char-regex": "^2.0.0", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/jest-watch-typeahead/node_modules/string-length/node_modules/char-regex": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/char-regex/-/char-regex-2.0.1.tgz", + "integrity": "sha512-oSvEeo6ZUD7NepqAat3RqoucZ5SeqLJgOvVIwkafu6IP3V0pO38s/ypdVUmDDK6qIIHNlYHJAKX9E7R7HoKElw==", + "dev": true, + "engines": { + "node": ">=12.20" + } + }, + "node_modules/jest-watch-typeahead/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/jest-watch-typeahead/node_modules/strip-ansi/node_modules/ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/jest-watch-typeahead/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-watcher": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-watcher/-/jest-watcher-27.5.1.tgz", + "integrity": "sha512-z676SuD6Z8o8qbmEGhoEUFOM1+jfEiL3DXHK/xgEiG2EyNYfFG60jluWcupY6dATjfEsKQuibReS1djInQnoVw==", + "dev": true, + "dependencies": { + "@jest/test-result": "^27.5.1", + "@jest/types": "^27.5.1", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "jest-util": "^27.5.1", + "string-length": "^4.0.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/jest-watcher/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-watcher/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/jest-watcher/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/jest-watcher/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/jest-watcher/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-watcher/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-worker": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/jest-worker/-/jest-worker-27.5.1.tgz", + "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", + "dependencies": { + "@types/node": "*", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/jest-worker/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/joi": { + "version": "17.13.1", + "resolved": "https://registry.npmmirror.com/joi/-/joi-17.13.1.tgz", + "integrity": "sha512-vaBlIKCyo4FCUtCm7Eu4QZd/q02bWcxfUO6YSXAZOWF6gzcLBeba8kwotUdYJjDLW8Cz8RywsSOqiNJZW0mNvg==", + "dependencies": { + "@hapi/hoek": "^9.3.0", + "@hapi/topo": "^5.1.0", + "@sideway/address": "^4.1.5", + "@sideway/formula": "^3.0.1", + "@sideway/pinpoint": "^2.0.0" + } + }, + "node_modules/js-beautify": { + "version": "1.15.1", + "resolved": "https://registry.npmmirror.com/js-beautify/-/js-beautify-1.15.1.tgz", + "integrity": "sha512-ESjNzSlt/sWE8sciZH8kBF8BPlwXPwhR6pWKAw8bw4Bwj+iZcnKW6ONWUutJ7eObuBZQpiIb8S7OYspWrKt7rA==", + "dev": true, + "dependencies": { + "config-chain": "^1.1.13", + "editorconfig": "^1.0.4", + "glob": "^10.3.3", + "js-cookie": "^3.0.5", + "nopt": "^7.2.0" + }, + "bin": { + "css-beautify": "js/bin/css-beautify.js", + "html-beautify": "js/bin/html-beautify.js", + "js-beautify": "js/bin/js-beautify.js" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/js-beautify/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/js-beautify/node_modules/glob": { + "version": "10.3.15", + "resolved": "https://registry.npmmirror.com/glob/-/glob-10.3.15.tgz", + "integrity": "sha512-0c6RlJt1TICLyvJYIApxb8GsXoai0KUP7AxKKAtsYXdgJR1mGEUa7DgwShbdk1nly0PYoZj01xd4hzbq3fsjpw==", + "dev": true, + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^2.3.6", + "minimatch": "^9.0.1", + "minipass": "^7.0.4", + "path-scurry": "^1.11.0" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/js-beautify/node_modules/minimatch": { + "version": "9.0.4", + "resolved": "https://registry.npmmirror.com/minimatch/-/minimatch-9.0.4.tgz", + "integrity": "sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/js-beautify/node_modules/minipass": { + "version": "7.1.1", + "resolved": "https://registry.npmmirror.com/minipass/-/minipass-7.1.1.tgz", + "integrity": "sha512-UZ7eQ+h8ywIRAW1hIEl2AqdwzJucU/Kp59+8kkZeSvafXhZjul247BvIJjEVFVeON6d7lM46XX1HXCduKAS8VA==", + "dev": true, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/js-cookie": { + "version": "3.0.5", + "resolved": "https://registry.npmmirror.com/js-cookie/-/js-cookie-3.0.5.tgz", + "integrity": "sha512-cEiJEAEoIbWfCZYKWhVwFuvPX1gETRYPw6LlaTKoxD3s2AkXzkCjnp6h0V77ozyqj0jakteJ4YqDJT830+lVGw==", + "dev": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/js-message": { + "version": "1.0.7", + "resolved": "https://registry.npmmirror.com/js-message/-/js-message-1.0.7.tgz", + "integrity": "sha512-efJLHhLjIyKRewNS9EGZ4UpI8NguuL6fKkhRxVuMmrGV2xN/0APGdQYwLFky5w9naebSZ0OwAGp0G6/2Cg90rA==", + "engines": { + "node": ">=0.6.0" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmmirror.com/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jscodeshift": { + "version": "0.11.0", + "resolved": "https://registry.npmmirror.com/jscodeshift/-/jscodeshift-0.11.0.tgz", + "integrity": "sha512-SdRK2C7jjs4k/kT2mwtO07KJN9RnjxtKn03d9JVj6c3j9WwaLcFYsICYDnLAzY0hp+wG2nxl+Cm2jWLiNVYb8g==", + "dependencies": { + "@babel/core": "^7.1.6", + "@babel/parser": "^7.1.6", + "@babel/plugin-proposal-class-properties": "^7.1.0", + "@babel/plugin-proposal-nullish-coalescing-operator": "^7.1.0", + "@babel/plugin-proposal-optional-chaining": "^7.1.0", + "@babel/plugin-transform-modules-commonjs": "^7.1.0", + "@babel/preset-flow": "^7.0.0", + "@babel/preset-typescript": "^7.1.0", + "@babel/register": "^7.0.0", + "babel-core": "^7.0.0-bridge.0", + "colors": "^1.1.2", + "flow-parser": "0.*", + "graceful-fs": "^4.2.4", + "micromatch": "^3.1.10", + "neo-async": "^2.5.0", + "node-dir": "^0.1.17", + "recast": "^0.20.3", + "temp": "^0.8.1", + "write-file-atomic": "^2.3.0" + }, + "bin": { + "jscodeshift": "bin/jscodeshift.js" + }, + "peerDependencies": { + "@babel/preset-env": "^7.1.6" + } + }, + "node_modules/jscodeshift/node_modules/braces": { + "version": "2.3.2", + "resolved": "https://registry.npmmirror.com/braces/-/braces-2.3.2.tgz", + "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", + "dependencies": { + "arr-flatten": "^1.1.0", + "array-unique": "^0.3.2", + "extend-shallow": "^2.0.1", + "fill-range": "^4.0.0", + "isobject": "^3.0.1", + "repeat-element": "^1.1.2", + "snapdragon": "^0.8.1", + "snapdragon-node": "^2.0.1", + "split-string": "^3.0.2", + "to-regex": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jscodeshift/node_modules/fill-range": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/fill-range/-/fill-range-4.0.0.tgz", + "integrity": "sha512-VcpLTWqWDiTerugjj8e3+esbg+skS3M9e54UuR3iCeIDMXCLTsAH8hTSzDQU/X6/6t3eYkOKoZSef2PlU6U1XQ==", + "dependencies": { + "extend-shallow": "^2.0.1", + "is-number": "^3.0.0", + "repeat-string": "^1.6.1", + "to-regex-range": "^2.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jscodeshift/node_modules/is-extendable": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/is-extendable/-/is-extendable-1.0.1.tgz", + "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "dependencies": { + "is-plain-object": "^2.0.4" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jscodeshift/node_modules/is-number": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/is-number/-/is-number-3.0.0.tgz", + "integrity": "sha512-4cboCqIpliH+mAvFNegjZQ4kgKc3ZUhQVr3HvWbSh5q3WH2v82ct+T2Y1hdU5Gdtorx/cLifQjqCbL7bpznLTg==", + "dependencies": { + "kind-of": "^3.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jscodeshift/node_modules/is-number/node_modules/kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmmirror.com/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "dependencies": { + "is-buffer": "^1.1.5" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jscodeshift/node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmmirror.com/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jscodeshift/node_modules/micromatch": { + "version": "3.1.10", + "resolved": "https://registry.npmmirror.com/micromatch/-/micromatch-3.1.10.tgz", + "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", + "dependencies": { + "arr-diff": "^4.0.0", + "array-unique": "^0.3.2", + "braces": "^2.3.1", + "define-property": "^2.0.2", + "extend-shallow": "^3.0.2", + "extglob": "^2.0.4", + "fragment-cache": "^0.2.1", + "kind-of": "^6.0.2", + "nanomatch": "^1.2.9", + "object.pick": "^1.3.0", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jscodeshift/node_modules/micromatch/node_modules/extend-shallow": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/extend-shallow/-/extend-shallow-3.0.2.tgz", + "integrity": "sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==", + "dependencies": { + "assign-symbols": "^1.0.0", + "is-extendable": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jscodeshift/node_modules/to-regex-range": { + "version": "2.1.1", + "resolved": "https://registry.npmmirror.com/to-regex-range/-/to-regex-range-2.1.1.tgz", + "integrity": "sha512-ZZWNfCjUokXXDGXFpZehJIkZqq91BcULFq/Pi7M5i4JnxXdhMKAK682z8bCW3o8Hj1wuuzoKcW3DfVzaP6VuNg==", + "dependencies": { + "is-number": "^3.0.0", + "repeat-string": "^1.6.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jscodeshift/node_modules/write-file-atomic": { + "version": "2.4.3", + "resolved": "https://registry.npmmirror.com/write-file-atomic/-/write-file-atomic-2.4.3.tgz", + "integrity": "sha512-GaETH5wwsX+GcnzhPgKcKjJ6M2Cq3/iZp1WyY/X1CSqrW+jVNM9Y7D8EC2sM4ZG/V8wZlSniJnCKWPmBYAucRQ==", + "dependencies": { + "graceful-fs": "^4.1.11", + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.2" + } + }, + "node_modules/jsdom": { + "version": "16.7.0", + "resolved": "https://registry.npmmirror.com/jsdom/-/jsdom-16.7.0.tgz", + "integrity": "sha512-u9Smc2G1USStM+s/x1ru5Sxrl6mPYCbByG1U/hUmqaVsm4tbNyS7CicOSRyuGQYZhTu0h84qkZZQ/I+dzizSVw==", + "dev": true, + "dependencies": { + "abab": "^2.0.5", + "acorn": "^8.2.4", + "acorn-globals": "^6.0.0", + "cssom": "^0.4.4", + "cssstyle": "^2.3.0", + "data-urls": "^2.0.0", + "decimal.js": "^10.2.1", + "domexception": "^2.0.1", + "escodegen": "^2.0.0", + "form-data": "^3.0.0", + "html-encoding-sniffer": "^2.0.1", + "http-proxy-agent": "^4.0.1", + "https-proxy-agent": "^5.0.0", + "is-potential-custom-element-name": "^1.0.1", + "nwsapi": "^2.2.0", + "parse5": "6.0.1", + "saxes": "^5.0.1", + "symbol-tree": "^3.2.4", + "tough-cookie": "^4.0.0", + "w3c-hr-time": "^1.0.2", + "w3c-xmlserializer": "^2.0.0", + "webidl-conversions": "^6.1.0", + "whatwg-encoding": "^1.0.5", + "whatwg-mimetype": "^2.3.0", + "whatwg-url": "^8.5.0", + "ws": "^7.4.6", + "xml-name-validator": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "canvas": "^2.5.0" + }, + "peerDependenciesMeta": { + "canvas": { + "optional": true + } + } + }, + "node_modules/jsdom/node_modules/parse5": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/parse5/-/parse5-6.0.1.tgz", + "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==", + "dev": true + }, + "node_modules/jsdom/node_modules/whatwg-mimetype": { + "version": "2.3.0", + "resolved": "https://registry.npmmirror.com/whatwg-mimetype/-/whatwg-mimetype-2.3.0.tgz", + "integrity": "sha512-M4yMwr6mAnQz76TbJm914+gPpB/nCwvZbJU28cUD6dR004SAxDLOOSUaB1JDRqLtaOV/vi0IC5lEAGFgrjGv/g==", + "dev": true + }, + "node_modules/jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmmirror.com/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true + }, + "node_modules/json-parse-better-errors": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", + "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==" + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmmirror.com/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmmirror.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmmirror.com/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmmirror.com/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmmirror.com/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmmirror.com/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "dependencies": { + "is-buffer": "^1.1.5" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmmirror.com/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/klona": { + "version": "2.0.6", + "resolved": "https://registry.npmmirror.com/klona/-/klona-2.0.6.tgz", + "integrity": "sha512-dhG34DXATL5hSxJbIexCft8FChFXtmskoZYnoPWjXQuebWYCNkVeV3KkGegCK9CP1oswI/vQibS2GY7Em/sJJA==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/launch-editor": { + "version": "2.6.1", + "resolved": "https://registry.npmmirror.com/launch-editor/-/launch-editor-2.6.1.tgz", + "integrity": "sha512-eB/uXmFVpY4zezmGp5XtU21kwo7GBbKB+EQ+UZeWtGb9yAM5xt/Evk+lYH3eRNAtId+ej4u7TYPFZ07w4s7rRw==", + "dependencies": { + "picocolors": "^1.0.0", + "shell-quote": "^1.8.1" + } + }, + "node_modules/launch-editor-middleware": { + "version": "2.6.1", + "resolved": "https://registry.npmmirror.com/launch-editor-middleware/-/launch-editor-middleware-2.6.1.tgz", + "integrity": "sha512-Fg/xYhf7ARmRp40n18wIfJyuAMEjXo67Yull7uF7d0OJ3qA4EYJISt1XfPPn69IIJ5jKgQwzcg6DqHYo95LL/g==", + "dev": true, + "dependencies": { + "launch-editor": "^2.6.1" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "engines": { + "node": ">=6" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmmirror.com/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lilconfig": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/lilconfig/-/lilconfig-2.1.0.tgz", + "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmmirror.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==" + }, + "node_modules/loader-runner": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/loader-runner/-/loader-runner-4.3.0.tgz", + "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==", + "engines": { + "node": ">=6.11.5" + } + }, + "node_modules/loader-utils": { + "version": "1.4.2", + "resolved": "https://registry.npmmirror.com/loader-utils/-/loader-utils-1.4.2.tgz", + "integrity": "sha512-I5d00Pd/jwMD2QCduo657+YM/6L3KZu++pmX9VFncxaxvHcru9jx1lBaFft+r4Mt2jK0Yhp41XlRAihzPxHNCg==", + "dependencies": { + "big.js": "^5.2.2", + "emojis-list": "^3.0.0", + "json5": "^1.0.1" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/loader-utils/node_modules/json5": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/json5/-/json5-1.0.2.tgz", + "integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==", + "dependencies": { + "minimist": "^1.2.0" + }, + "bin": { + "json5": "lib/cli.js" + } + }, + "node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmmirror.com/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "node_modules/lodash-es": { + "version": "4.17.21", + "resolved": "https://registry.npmmirror.com/lodash-es/-/lodash-es-4.17.21.tgz", + "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==" + }, + "node_modules/lodash-unified": { + "version": "1.0.3", + "resolved": "https://registry.npmmirror.com/lodash-unified/-/lodash-unified-1.0.3.tgz", + "integrity": "sha512-WK9qSozxXOD7ZJQlpSqOT+om2ZfcT4yO+03FuzAHD0wF6S0l0090LRPDx3vhTTLZ8cFKpBn+IOcVXK6qOcIlfQ==", + "peerDependencies": { + "@types/lodash-es": "*", + "lodash": "*", + "lodash-es": "*" + } + }, + "node_modules/lodash.clonedeep": { + "version": "4.5.0", + "resolved": "https://registry.npmmirror.com/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz", + "integrity": "sha512-H5ZhCF25riFd9uB5UCkVKo61m3S/xZk1x4wA6yp/L3RFP6Z/eHH1ymQcGLo7J3GMPfm0V/7m1tryHuGVxpqEBQ==" + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmmirror.com/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" + }, + "node_modules/lodash.defaultsdeep": { + "version": "4.6.1", + "resolved": "https://registry.npmmirror.com/lodash.defaultsdeep/-/lodash.defaultsdeep-4.6.1.tgz", + "integrity": "sha512-3j8wdDzYuWO3lM3Reg03MuQR957t287Rpcxp1njpEa8oDrikb+FwGdW3n+FELh/A6qib6yPit0j/pv9G/yeAqA==", + "dev": true + }, + "node_modules/lodash.mapvalues": { + "version": "4.6.0", + "resolved": "https://registry.npmmirror.com/lodash.mapvalues/-/lodash.mapvalues-4.6.0.tgz", + "integrity": "sha512-JPFqXFeZQ7BfS00H58kClY7SPVeHertPE0lNuCyZ26/XlN8TvakYD7b9bGyNmXbT/D3BbtPAAmq90gPWqLkxlQ==", + "dev": true + }, + "node_modules/lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==", + "dev": true + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmmirror.com/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==" + }, + "node_modules/lodash.sortby": { + "version": "4.7.0", + "resolved": "https://registry.npmmirror.com/lodash.sortby/-/lodash.sortby-4.7.0.tgz", + "integrity": "sha512-HDWXG8isMntAyRF5vZ7xKuEvOhT4AhlRt/3czTSjvGUxjYCBVRQY48ViDHyfYz9VIoBkW4TMGQNapx+l3RUwdA==" + }, + "node_modules/lodash.truncate": { + "version": "4.4.2", + "resolved": "https://registry.npmmirror.com/lodash.truncate/-/lodash.truncate-4.4.2.tgz", + "integrity": "sha512-jttmRe7bRse52OsWIMDLaXxWqRAmtIUccAQ3garviCqJjafXOfNMO0yMfNpdD6zbGaTU0P5Nz7e7gAT6cKmJRw==", + "dev": true + }, + "node_modules/lodash.uniq": { + "version": "4.5.0", + "resolved": "https://registry.npmmirror.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz", + "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==", + "dev": true + }, + "node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmmirror.com/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-symbols/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/log-symbols/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/log-symbols/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/log-symbols/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/log-symbols/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/log-symbols/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/log-update": { + "version": "2.3.0", + "resolved": "https://registry.npmmirror.com/log-update/-/log-update-2.3.0.tgz", + "integrity": "sha512-vlP11XfFGyeNQlmEn9tJ66rEW1coA/79m5z6BCkudjbAGE83uhAcGYrBFwfs3AdLiLzGRusRPAbSPK9xZteCmg==", + "dev": true, + "dependencies": { + "ansi-escapes": "^3.0.0", + "cli-cursor": "^2.0.0", + "wrap-ansi": "^3.0.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/log-update/node_modules/ansi-escapes": { + "version": "3.2.0", + "resolved": "https://registry.npmmirror.com/ansi-escapes/-/ansi-escapes-3.2.0.tgz", + "integrity": "sha512-cBhpre4ma+U0T1oM5fXg7Dy1Jw7zzwv7lt/GoCpr+hDQJoYnKVPLL4dCvSEFMmQurOQvSrwT7SL/DAlhBI97RQ==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/log-update/node_modules/ansi-regex": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/ansi-regex/-/ansi-regex-3.0.1.tgz", + "integrity": "sha512-+O9Jct8wf++lXxxFc4hc8LsjaSq0HFzzL7cVsw8pRDIPdjKD2mT4ytDZlLuSBZ4cLKZFXIrMGO7DbQCtMJJMKw==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/log-update/node_modules/cli-cursor": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/cli-cursor/-/cli-cursor-2.1.0.tgz", + "integrity": "sha512-8lgKz8LmCRYZZQDpRyT2m5rKJ08TnU4tR9FFFW2rxpxR1FzWi4PQ/NfyODchAatHaUgnSPVcx/R5w6NuTBzFiw==", + "dev": true, + "dependencies": { + "restore-cursor": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/log-update/node_modules/is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha512-VHskAKYM8RfSFXwee5t5cbN5PZeq1Wrh6qd5bkyiXIf6UQcN6w/A0eXM9r6t8d+GYOh+o6ZhiEnb88LN/Y8m2w==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/log-update/node_modules/mimic-fn": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/mimic-fn/-/mimic-fn-1.2.0.tgz", + "integrity": "sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/log-update/node_modules/onetime": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/onetime/-/onetime-2.0.1.tgz", + "integrity": "sha512-oyyPpiMaKARvvcgip+JV+7zci5L8D1W9RZIz2l1o08AM3pfspitVWnPt3mzHcBPp12oYMTy0pqrFs/C+m3EwsQ==", + "dev": true, + "dependencies": { + "mimic-fn": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/log-update/node_modules/restore-cursor": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/restore-cursor/-/restore-cursor-2.0.0.tgz", + "integrity": "sha512-6IzJLuGi4+R14vwagDHX+JrXmPVtPpn4mffDJ1UdR7/Edm87fl6yi8mMBIVvFtJaNTUvjughmW4hwLhRG7gC1Q==", + "dev": true, + "dependencies": { + "onetime": "^2.0.0", + "signal-exit": "^3.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/log-update/node_modules/string-width": { + "version": "2.1.1", + "resolved": "https://registry.npmmirror.com/string-width/-/string-width-2.1.1.tgz", + "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", + "dev": true, + "dependencies": { + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^4.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/log-update/node_modules/strip-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-4.0.0.tgz", + "integrity": "sha512-4XaJ2zQdCzROZDivEVIDPkcQn8LMFSa8kj8Gxb/Lnwzv9A8VctNZ+lfivC/sV3ivW8ElJTERXZoPBRrZKkNKow==", + "dev": true, + "dependencies": { + "ansi-regex": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/log-update/node_modules/wrap-ansi": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/wrap-ansi/-/wrap-ansi-3.0.1.tgz", + "integrity": "sha512-iXR3tDXpbnTpzjKSylUJRkLuOrEC7hwEB221cgn6wtF8wpmz28puFXAEfPT5zrjM3wahygB//VuWEr1vTkDcNQ==", + "dev": true, + "dependencies": { + "string-width": "^2.1.1", + "strip-ansi": "^4.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/loglevel": { + "version": "1.9.1", + "resolved": "https://registry.npmmirror.com/loglevel/-/loglevel-1.9.1.tgz", + "integrity": "sha512-hP3I3kCrDIMuRwAwHltphhDM1r8i55H33GgqjXbrisuJhF4kRhW1dNuxsRklp4bXl8DSdLaNLuiL4A/LWRfxvg==", + "engines": { + "node": ">= 0.6.0" + }, + "funding": { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/loglevel" + } + }, + "node_modules/long": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/long/-/long-4.0.0.tgz", + "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmmirror.com/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lowdb": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/lowdb/-/lowdb-1.0.0.tgz", + "integrity": "sha512-2+x8esE/Wb9SQ1F9IHaYWfsC9FIecLOPrK4g17FGEayjUWH172H6nwicRovGvSE2CPZouc2MCIqCI7h9d+GftQ==", + "dependencies": { + "graceful-fs": "^4.1.3", + "is-promise": "^2.1.0", + "lodash": "4", + "pify": "^3.0.0", + "steno": "^0.4.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/lower-case": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/lower-case/-/lower-case-2.0.2.tgz", + "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==", + "dev": true, + "dependencies": { + "tslib": "^2.0.3" + } + }, + "node_modules/lowercase-keys": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/lowercase-keys/-/lowercase-keys-1.0.1.tgz", + "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmmirror.com/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/magic-string": { + "version": "0.30.10", + "resolved": "https://registry.npmmirror.com/magic-string/-/magic-string-0.30.10.tgz", + "integrity": "sha512-iIRwTIf0QKV3UAnYK4PU8uiEc4SRh5jX0mwpIwETPpHdhVM4f53RSwS/vXvN1JhGX+Cs7B8qIq3d6AH49O5fAQ==", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.4.15" + } + }, + "node_modules/make-dir": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/make-dir/-/make-dir-3.1.0.tgz", + "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", + "devOptional": true, + "dependencies": { + "semver": "^6.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-error": { + "version": "1.3.6", + "resolved": "https://registry.npmmirror.com/make-error/-/make-error-1.3.6.tgz", + "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", + "dev": true + }, + "node_modules/makeerror": { + "version": "1.0.12", + "resolved": "https://registry.npmmirror.com/makeerror/-/makeerror-1.0.12.tgz", + "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==", + "dev": true, + "dependencies": { + "tmpl": "1.0.5" + } + }, + "node_modules/map-cache": { + "version": "0.2.2", + "resolved": "https://registry.npmmirror.com/map-cache/-/map-cache-0.2.2.tgz", + "integrity": "sha512-8y/eV9QQZCiyn1SprXSrCmqJN0yNRATe+PO8ztwqrvrbdRLA3eYJF0yaR0YayLWkMbsQSKWS9N2gPcGEc4UsZg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/map-visit": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/map-visit/-/map-visit-1.0.0.tgz", + "integrity": "sha512-4y7uGv8bd2WdM9vpQsiQNo41Ln1NvhvDRuVt0k2JZQ+ezN2uaQes7lZeZ+QQUHOLQAtDaBJ+7wCbi+ab/KFs+w==", + "dependencies": { + "object-visit": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/marked": { + "version": "12.0.2", + "resolved": "https://registry.npmmirror.com/marked/-/marked-12.0.2.tgz", + "integrity": "sha512-qXUm7e/YKFoqFPYPa3Ukg9xlI5cyAtGmyEIzMfW//m6kXwCy2Ps9DYf5ioijFKQ8qyuscrHoY04iJGctu2Kg0Q==", + "bin": { + "marked": "bin/marked.js" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/marked-highlight": { + "version": "2.1.1", + "resolved": "https://registry.npmmirror.com/marked-highlight/-/marked-highlight-2.1.1.tgz", + "integrity": "sha512-ktdqwtBne8rim5mb+vvZ9FzElGFb+CHCgkx/g6DSzTjaSrVnxsJdSzB5YgCkknFrcOW+viocM1lGyIjC0oa3fg==", + "peerDependencies": { + "marked": ">=4 <13" + } + }, + "node_modules/md5.js": { + "version": "1.3.5", + "resolved": "https://registry.npmmirror.com/md5.js/-/md5.js-1.3.5.tgz", + "integrity": "sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==", + "peer": true, + "dependencies": { + "hash-base": "^3.0.0", + "inherits": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "node_modules/mdn-data": { + "version": "2.0.30", + "resolved": "https://registry.npmmirror.com/mdn-data/-/mdn-data-2.0.30.tgz", + "integrity": "sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==", + "dev": true + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmmirror.com/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/memfs": { + "version": "3.5.3", + "resolved": "https://registry.npmmirror.com/memfs/-/memfs-3.5.3.tgz", + "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==", + "dev": true, + "dependencies": { + "fs-monkey": "^1.0.4" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/memoize-one": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/memoize-one/-/memoize-one-6.0.0.tgz", + "integrity": "sha512-rkpe71W0N0c0Xz6QD0eJETuWAJGnJ9afsl1srmwPrI+yBCkge5EycXXbYRyvL29zZVUWQCY7InPRCv3GDXuZNw==" + }, + "node_modules/memory-fs": { + "version": "0.4.1", + "resolved": "https://registry.npmmirror.com/memory-fs/-/memory-fs-0.4.1.tgz", + "integrity": "sha512-cda4JKCxReDXFXRqOHPQscuIYg1PvxbE2S2GP45rnwfEK+vZaXC8C1OFvdHIbgw0DLzowXGVoxLaAmlgRy14GQ==", + "peer": true, + "dependencies": { + "errno": "^0.1.3", + "readable-stream": "^2.0.1" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz", + "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + }, + "node_modules/merge-source-map": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/merge-source-map/-/merge-source-map-1.1.0.tgz", + "integrity": "sha512-Qkcp7P2ygktpMPh2mCQZaf3jhN6D3Z/qVZHSdWvQ+2Ef5HgRAPBO57A77+ENm0CPx2+1Ce/MYKi3ymqdfuqibw==", + "dev": true, + "dependencies": { + "source-map": "^0.6.1" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmmirror.com/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmmirror.com/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/micromatch": { + "version": "4.0.5", + "resolved": "https://registry.npmmirror.com/micromatch/-/micromatch-4.0.5.tgz", + "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "dependencies": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/miller-rabin": { + "version": "4.0.1", + "resolved": "https://registry.npmmirror.com/miller-rabin/-/miller-rabin-4.0.1.tgz", + "integrity": "sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==", + "peer": true, + "dependencies": { + "bn.js": "^4.0.0", + "brorand": "^1.0.1" + }, + "bin": { + "miller-rabin": "bin/miller-rabin" + } + }, + "node_modules/miller-rabin/node_modules/bn.js": { + "version": "4.12.0", + "resolved": "https://registry.npmmirror.com/bn.js/-/bn.js-4.12.0.tgz", + "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==", + "peer": true + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmmirror.com/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmmirror.com/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmmirror.com/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/mimic-response": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/mimic-response/-/mimic-response-1.0.1.tgz", + "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/mini-css-extract-plugin": { + "version": "2.9.0", + "resolved": "https://registry.npmmirror.com/mini-css-extract-plugin/-/mini-css-extract-plugin-2.9.0.tgz", + "integrity": "sha512-Zs1YsZVfemekSZG+44vBsYTLQORkPMwnlv+aehcxK/NLKC+EGhDB39/YePYYqx/sTk6NnYpuqikhSn7+JIevTA==", + "dev": true, + "dependencies": { + "schema-utils": "^4.0.0", + "tapable": "^2.2.1" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + } + }, + "node_modules/mini-css-extract-plugin/node_modules/ajv": { + "version": "8.13.0", + "resolved": "https://registry.npmmirror.com/ajv/-/ajv-8.13.0.tgz", + "integrity": "sha512-PRA911Blj99jR5RMeTunVbNXMF6Lp4vZXnk5GQjcnUWUTsrXtekg/pnmFFI2u/I36Y/2bITGS30GZCXei6uNkA==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.3", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.4.1" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/mini-css-extract-plugin/node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/mini-css-extract-plugin/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "dev": true + }, + "node_modules/mini-css-extract-plugin/node_modules/schema-utils": { + "version": "4.2.0", + "resolved": "https://registry.npmmirror.com/schema-utils/-/schema-utils-4.2.0.tgz", + "integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/mini-css-extract-plugin/node_modules/tapable": { + "version": "2.2.1", + "resolved": "https://registry.npmmirror.com/tapable/-/tapable-2.2.1.tgz", + "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/minimalistic-assert": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", + "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" + }, + "node_modules/minimalistic-crypto-utils": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz", + "integrity": "sha512-JIYlbt6g8i5jKfJ3xz7rF0LXmv2TkDxBLUkiBeZ7bAx4GnnNMr8xFpGnOxn6GhTEHx3SjRrZEoU+j04prX1ktg==", + "peer": true + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmmirror.com/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmmirror.com/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/minipass": { + "version": "3.3.6", + "resolved": "https://registry.npmmirror.com/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "devOptional": true, + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/minipass/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "devOptional": true + }, + "node_modules/minizlib": { + "version": "2.1.2", + "resolved": "https://registry.npmmirror.com/minizlib/-/minizlib-2.1.2.tgz", + "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", + "optional": true, + "dependencies": { + "minipass": "^3.0.0", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/minizlib/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "optional": true + }, + "node_modules/mississippi": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/mississippi/-/mississippi-3.0.0.tgz", + "integrity": "sha512-x471SsVjUtBRtcvd4BzKE9kFC+/2TeWgKCgw0bZcw1b9l2X3QX5vCWgF+KaZaYm87Ss//rHnWryupDrgLvmSkA==", + "peer": true, + "dependencies": { + "concat-stream": "^1.5.0", + "duplexify": "^3.4.2", + "end-of-stream": "^1.1.0", + "flush-write-stream": "^1.0.0", + "from2": "^2.1.0", + "parallel-transform": "^1.1.0", + "pump": "^3.0.0", + "pumpify": "^1.3.3", + "stream-each": "^1.1.0", + "through2": "^2.0.0" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/mixin-deep": { + "version": "1.3.2", + "resolved": "https://registry.npmmirror.com/mixin-deep/-/mixin-deep-1.3.2.tgz", + "integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==", + "dependencies": { + "for-in": "^1.0.2", + "is-extendable": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/mixin-deep/node_modules/is-extendable": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/is-extendable/-/is-extendable-1.0.1.tgz", + "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "dependencies": { + "is-plain-object": "^2.0.4" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/mkdirp": { + "version": "0.5.6", + "resolved": "https://registry.npmmirror.com/mkdirp/-/mkdirp-0.5.6.tgz", + "integrity": "sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==", + "dependencies": { + "minimist": "^1.2.6" + }, + "bin": { + "mkdirp": "bin/cmd.js" + } + }, + "node_modules/module-alias": { + "version": "2.2.3", + "resolved": "https://registry.npmmirror.com/module-alias/-/module-alias-2.2.3.tgz", + "integrity": "sha512-23g5BFj4zdQL/b6tor7Ji+QY4pEfNH784BMslY9Qb0UnJWRAt+lQGLYmRaM0KDBwIG23ffEBELhZDP2rhi9f/Q==", + "dev": true + }, + "node_modules/move-concurrently": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/move-concurrently/-/move-concurrently-1.0.1.tgz", + "integrity": "sha512-hdrFxZOycD/g6A6SoI2bB5NA/5NEqD0569+S47WZhPvm46sD50ZHdYaFmnua5lndde9rCHGjmfK7Z8BuCt/PcQ==", + "peer": true, + "dependencies": { + "aproba": "^1.1.1", + "copy-concurrently": "^1.0.0", + "fs-write-stream-atomic": "^1.0.8", + "mkdirp": "^0.5.1", + "rimraf": "^2.5.4", + "run-queue": "^1.0.3" + } + }, + "node_modules/move-concurrently/node_modules/rimraf": { + "version": "2.7.1", + "resolved": "https://registry.npmmirror.com/rimraf/-/rimraf-2.7.1.tgz", + "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", + "peer": true, + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + } + }, + "node_modules/mrmime": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/mrmime/-/mrmime-2.0.0.tgz", + "integrity": "sha512-eu38+hdgojoyq63s+yTpN4XMBdt5l8HhMhc4VKLO9KM5caLIBvUm4thi7fFaxyTmCKeNnXZ5pAlBwCUnhA09uw==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmmirror.com/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/multicast-dns": { + "version": "7.2.5", + "resolved": "https://registry.npmmirror.com/multicast-dns/-/multicast-dns-7.2.5.tgz", + "integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==", + "dev": true, + "dependencies": { + "dns-packet": "^5.2.2", + "thunky": "^1.0.2" + }, + "bin": { + "multicast-dns": "cli.js" + } + }, + "node_modules/mute-stream": { + "version": "0.0.8", + "resolved": "https://registry.npmmirror.com/mute-stream/-/mute-stream-0.0.8.tgz", + "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==" + }, + "node_modules/mz": { + "version": "2.7.0", + "resolved": "https://registry.npmmirror.com/mz/-/mz-2.7.0.tgz", + "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", + "dev": true, + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, + "node_modules/nan": { + "version": "2.19.0", + "resolved": "https://registry.npmmirror.com/nan/-/nan-2.19.0.tgz", + "integrity": "sha512-nO1xXxfh/RWNxfd/XPfbIfFk5vgLsAxUR9y5O0cHMJu/AW9U95JLXqthYHjEp+8gQ5p96K9jUp8nbVOxCdRbtw==", + "optional": true + }, + "node_modules/nanoid": { + "version": "3.3.7", + "resolved": "https://registry.npmmirror.com/nanoid/-/nanoid-3.3.7.tgz", + "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/nanomatch": { + "version": "1.2.13", + "resolved": "https://registry.npmmirror.com/nanomatch/-/nanomatch-1.2.13.tgz", + "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==", + "dependencies": { + "arr-diff": "^4.0.0", + "array-unique": "^0.3.2", + "define-property": "^2.0.2", + "extend-shallow": "^3.0.2", + "fragment-cache": "^0.2.1", + "is-windows": "^1.0.2", + "kind-of": "^6.0.2", + "object.pick": "^1.3.0", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/nanomatch/node_modules/extend-shallow": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/extend-shallow/-/extend-shallow-3.0.2.tgz", + "integrity": "sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==", + "dependencies": { + "assign-symbols": "^1.0.0", + "is-extendable": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/nanomatch/node_modules/is-extendable": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/is-extendable/-/is-extendable-1.0.1.tgz", + "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "dependencies": { + "is-plain-object": "^2.0.4" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/nanomatch/node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmmirror.com/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/nanopop": { + "version": "2.4.2", + "resolved": "https://registry.npmmirror.com/nanopop/-/nanopop-2.4.2.tgz", + "integrity": "sha512-NzOgmMQ+elxxHeIha+OG/Pv3Oc3p4RU2aBhwWwAqDpXrdTbtRylbRLQztLy8dMMwfl6pclznBdfUhccEn9ZIzw==" + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmmirror.com/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true + }, + "node_modules/natural-compare-lite": { + "version": "1.4.0", + "resolved": "https://registry.npmmirror.com/natural-compare-lite/-/natural-compare-lite-1.4.0.tgz", + "integrity": "sha512-Tj+HTDSJJKaZnfiuw+iaF9skdPpTo2GtEly5JHnWV/hfv2Qj/9RKsGISQtLh2ox3l5EAGw487hnBee0sIJ6v2g==", + "dev": true + }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmmirror.com/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmmirror.com/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" + }, + "node_modules/next-tick": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/next-tick/-/next-tick-1.1.0.tgz", + "integrity": "sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ==" + }, + "node_modules/nice-try": { + "version": "1.0.5", + "resolved": "https://registry.npmmirror.com/nice-try/-/nice-try-1.0.5.tgz", + "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==" + }, + "node_modules/no-case": { + "version": "3.0.4", + "resolved": "https://registry.npmmirror.com/no-case/-/no-case-3.0.4.tgz", + "integrity": "sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==", + "dev": true, + "dependencies": { + "lower-case": "^2.0.2", + "tslib": "^2.0.3" + } + }, + "node_modules/node-abort-controller": { + "version": "3.1.1", + "resolved": "https://registry.npmmirror.com/node-abort-controller/-/node-abort-controller-3.1.1.tgz", + "integrity": "sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==" + }, + "node_modules/node-dir": { + "version": "0.1.17", + "resolved": "https://registry.npmmirror.com/node-dir/-/node-dir-0.1.17.tgz", + "integrity": "sha512-tmPX422rYgofd4epzrNoOXiE8XFZYOcCq1vD7MAXCDO+O+zndlA2ztdKKMa+EeuBG5tHETpr4ml4RGgpqDCCAg==", + "dependencies": { + "minimatch": "^3.0.2" + }, + "engines": { + "node": ">= 0.10.5" + } + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmmirror.com/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/node-fetch/node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmmirror.com/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "node_modules/node-fetch/node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "node_modules/node-fetch/node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/node-forge": { + "version": "1.3.1", + "resolved": "https://registry.npmmirror.com/node-forge/-/node-forge-1.3.1.tgz", + "integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==", + "dev": true, + "engines": { + "node": ">= 6.13.0" + } + }, + "node_modules/node-gyp-build": { + "version": "4.8.1", + "resolved": "https://registry.npmmirror.com/node-gyp-build/-/node-gyp-build-4.8.1.tgz", + "integrity": "sha512-OSs33Z9yWr148JZcbZd5WiAXhh/n9z8TxQcdMhIOlpN9AhWpLfvVFO73+m77bBABQMaY9XSvIa+qk0jlI7Gcaw==", + "bin": { + "node-gyp-build": "bin.js", + "node-gyp-build-optional": "optional.js", + "node-gyp-build-test": "build-test.js" + } + }, + "node_modules/node-int64": { + "version": "0.4.0", + "resolved": "https://registry.npmmirror.com/node-int64/-/node-int64-0.4.0.tgz", + "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", + "dev": true + }, + "node_modules/node-libs-browser": { + "version": "2.2.1", + "resolved": "https://registry.npmmirror.com/node-libs-browser/-/node-libs-browser-2.2.1.tgz", + "integrity": "sha512-h/zcD8H9kaDZ9ALUWwlBUDo6TKF8a7qBSCSEGfjTVIYeqsioSKaAX+BN7NgiMGp6iSIXZ3PxgCu8KS3b71YK5Q==", + "peer": true, + "dependencies": { + "assert": "^1.1.1", + "browserify-zlib": "^0.2.0", + "buffer": "^4.3.0", + "console-browserify": "^1.1.0", + "constants-browserify": "^1.0.0", + "crypto-browserify": "^3.11.0", + "domain-browser": "^1.1.1", + "events": "^3.0.0", + "https-browserify": "^1.0.0", + "os-browserify": "^0.3.0", + "path-browserify": "0.0.1", + "process": "^0.11.10", + "punycode": "^1.2.4", + "querystring-es3": "^0.2.0", + "readable-stream": "^2.3.3", + "stream-browserify": "^2.0.1", + "stream-http": "^2.7.2", + "string_decoder": "^1.0.0", + "timers-browserify": "^2.0.4", + "tty-browserify": "0.0.0", + "url": "^0.11.0", + "util": "^0.11.0", + "vm-browserify": "^1.0.1" + } + }, + "node_modules/node-libs-browser/node_modules/buffer": { + "version": "4.9.2", + "resolved": "https://registry.npmmirror.com/buffer/-/buffer-4.9.2.tgz", + "integrity": "sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==", + "peer": true, + "dependencies": { + "base64-js": "^1.0.2", + "ieee754": "^1.1.4", + "isarray": "^1.0.0" + } + }, + "node_modules/node-libs-browser/node_modules/inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmmirror.com/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==", + "peer": true + }, + "node_modules/node-libs-browser/node_modules/punycode": { + "version": "1.4.1", + "resolved": "https://registry.npmmirror.com/punycode/-/punycode-1.4.1.tgz", + "integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ==", + "peer": true + }, + "node_modules/node-libs-browser/node_modules/util": { + "version": "0.11.1", + "resolved": "https://registry.npmmirror.com/util/-/util-0.11.1.tgz", + "integrity": "sha512-HShAsny+zS2TZfaXxD9tYj4HQGlBezXZMZuM/S5PKLLoZkShZiGk9o5CzukI1LVHZvjdvZ2Sj1aW/Ndn2NB/HQ==", + "peer": true, + "dependencies": { + "inherits": "2.0.3" + } + }, + "node_modules/node-notifier": { + "version": "10.0.1", + "resolved": "https://registry.npmmirror.com/node-notifier/-/node-notifier-10.0.1.tgz", + "integrity": "sha512-YX7TSyDukOZ0g+gmzjB6abKu+hTGvO8+8+gIFDsRCU2t8fLV/P2unmt+LGFaIa4y64aX98Qksa97rgz4vMNeLQ==", + "dependencies": { + "growly": "^1.3.0", + "is-wsl": "^2.2.0", + "semver": "^7.3.5", + "shellwords": "^0.1.1", + "uuid": "^8.3.2", + "which": "^2.0.2" + } + }, + "node_modules/node-notifier/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/node-notifier/node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmmirror.com/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/node-releases": { + "version": "2.0.14", + "resolved": "https://registry.npmmirror.com/node-releases/-/node-releases-2.0.14.tgz", + "integrity": "sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==" + }, + "node_modules/nopt": { + "version": "7.2.1", + "resolved": "https://registry.npmmirror.com/nopt/-/nopt-7.2.1.tgz", + "integrity": "sha512-taM24ViiimT/XntxbPyJQzCG+p4EKOpgD3mxFwW38mGjVUrfERQOeY4EDHjdnptttfHuHQXFx+lTP08Q+mLa/w==", + "dev": true, + "dependencies": { + "abbrev": "^2.0.0" + }, + "bin": { + "nopt": "bin/nopt.js" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/normalize-package-data": { + "version": "2.5.0", + "resolved": "https://registry.npmmirror.com/normalize-package-data/-/normalize-package-data-2.5.0.tgz", + "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", + "dependencies": { + "hosted-git-info": "^2.1.4", + "resolve": "^1.10.0", + "semver": "2 || 3 || 4 || 5", + "validate-npm-package-license": "^3.0.1" + } + }, + "node_modules/normalize-package-data/node_modules/semver": { + "version": "5.7.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "devOptional": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-range": { + "version": "0.1.2", + "resolved": "https://registry.npmmirror.com/normalize-range/-/normalize-range-0.1.2.tgz", + "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-url": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/normalize-url/-/normalize-url-2.0.1.tgz", + "integrity": "sha512-D6MUW4K/VzoJ4rJ01JFKxDrtY1v9wrgzCX5f2qj/lzH1m/lW6MhUZFKerVsnyjOhOsYzI9Kqqak+10l4LvLpMw==", + "dependencies": { + "prepend-http": "^2.0.0", + "query-string": "^5.0.1", + "sort-keys": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/normalize-wheel-es": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/normalize-wheel-es/-/normalize-wheel-es-1.2.0.tgz", + "integrity": "sha512-Wj7+EJQ8mSuXr2iWfnujrimU35R2W4FAErEyTmJoJ7ucwTn2hOUSsRehMb5RSYkxXGTM7Y9QpvPmp++w5ftoJw==" + }, + "node_modules/npm-conf": { + "version": "1.1.3", + "resolved": "https://registry.npmmirror.com/npm-conf/-/npm-conf-1.1.3.tgz", + "integrity": "sha512-Yic4bZHJOt9RCFbRP3GgpqhScOY4HH3V2P8yBj6CeYq118Qr+BLXqT2JvpJ00mryLESpgOxf5XlFv4ZjXxLScw==", + "dependencies": { + "config-chain": "^1.1.11", + "pify": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm-run-path": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/npm-run-path/-/npm-run-path-2.0.2.tgz", + "integrity": "sha512-lJxZYlT4DW/bRUtFh1MQIWqmLwQfAxnqWG4HhEdjMlkrJYnJn0Jrr2u3mgxqaWsdiBc76TYkTG/mhrnYTuzfHw==", + "dependencies": { + "path-key": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm-run-path/node_modules/path-key": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/path-key/-/path-key-2.0.1.tgz", + "integrity": "sha512-fEHGKCSmUSDPv4uoj8AlD+joPlq3peND+HRYyxFz4KPw4z926S/b8rIuFs2FYJg3BwsxJf6A9/3eIdLaYC+9Dw==", + "engines": { + "node": ">=4" + } + }, + "node_modules/npmlog": { + "version": "5.0.1", + "resolved": "https://registry.npmmirror.com/npmlog/-/npmlog-5.0.1.tgz", + "integrity": "sha512-AqZtDUWOMKs1G/8lwylVjrdYgqA4d9nu8hc+0gzRxlDb1I10+FHBGMXs6aiQHFdCUUlqH99MUMuLfzWDNDtfxw==", + "deprecated": "This package is no longer supported.", + "optional": true, + "dependencies": { + "are-we-there-yet": "^2.0.0", + "console-control-strings": "^1.1.0", + "gauge": "^3.0.0", + "set-blocking": "^2.0.0" + } + }, + "node_modules/nth-check": { + "version": "2.1.1", + "resolved": "https://registry.npmmirror.com/nth-check/-/nth-check-2.1.1.tgz", + "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", + "dev": true, + "dependencies": { + "boolbase": "^1.0.0" + }, + "funding": { + "url": "https://github.com/fb55/nth-check?sponsor=1" + } + }, + "node_modules/nwsapi": { + "version": "2.2.10", + "resolved": "https://registry.npmmirror.com/nwsapi/-/nwsapi-2.2.10.tgz", + "integrity": "sha512-QK0sRs7MKv0tKe1+5uZIQk/C8XGza4DAnztJG8iD+TpJIORARrCxczA738awHrZoHeTjSSoHqao2teO0dC/gFQ==", + "dev": true + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmmirror.com/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-copy": { + "version": "0.1.0", + "resolved": "https://registry.npmmirror.com/object-copy/-/object-copy-0.1.0.tgz", + "integrity": "sha512-79LYn6VAb63zgtmAteVOWo9Vdj71ZVBy3Pbse+VqxDpEP83XuujMrGqHIwAXJ5I/aM0zU7dIyIAhifVTPrNItQ==", + "dependencies": { + "copy-descriptor": "^0.1.0", + "define-property": "^0.2.5", + "kind-of": "^3.0.3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-copy/node_modules/define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmmirror.com/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha512-Rr7ADjQZenceVOAKop6ALkkRAmH1A4Gx9hV/7ZujPUN2rkATqFO0JZLZInbAjpZYoJ1gUx8MRMQVkYemcbMSTA==", + "dependencies": { + "is-descriptor": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-copy/node_modules/is-descriptor": { + "version": "0.1.7", + "resolved": "https://registry.npmmirror.com/is-descriptor/-/is-descriptor-0.1.7.tgz", + "integrity": "sha512-C3grZTvObeN1xud4cRWl366OMXZTj0+HGyk4hvfpx4ZHt1Pb60ANSXqCK7pdOTeUQpRzECBSTphqvD7U+l22Eg==", + "dependencies": { + "is-accessor-descriptor": "^1.0.1", + "is-data-descriptor": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object-inspect": { + "version": "1.13.1", + "resolved": "https://registry.npmmirror.com/object-inspect/-/object-inspect-1.13.1.tgz", + "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmmirror.com/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "peer": true, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object-visit": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/object-visit/-/object-visit-1.0.1.tgz", + "integrity": "sha512-GBaMwwAVK9qbQN3Scdo0OyvgPW7l3lnaVMj84uTOZlswkX0KpF6fyDBJhtTthf7pymztoN36/KEr1DyhF96zEA==", + "dependencies": { + "isobject": "^3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object.assign": { + "version": "4.1.5", + "resolved": "https://registry.npmmirror.com/object.assign/-/object.assign-4.1.5.tgz", + "integrity": "sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==", + "peer": true, + "dependencies": { + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", + "has-symbols": "^1.0.3", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.pick": { + "version": "1.3.0", + "resolved": "https://registry.npmmirror.com/object.pick/-/object.pick-1.3.0.tgz", + "integrity": "sha512-tqa/UMy/CCoYmj+H5qc07qvSL9dqcs/WZENZ1JbtWBlATP+iVOe778gE6MSijnyCnORzDuX6hU+LA4SZ09YjFQ==", + "dependencies": { + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/obuf": { + "version": "1.1.2", + "resolved": "https://registry.npmmirror.com/obuf/-/obuf-1.1.2.tgz", + "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==", + "dev": true + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmmirror.com/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/on-headers": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/on-headers/-/on-headers-1.0.2.tgz", + "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", + "dev": true, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmmirror.com/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmmirror.com/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/open": { + "version": "8.4.2", + "resolved": "https://registry.npmmirror.com/open/-/open-8.4.2.tgz", + "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==", + "dependencies": { + "define-lazy-prop": "^2.0.0", + "is-docker": "^2.1.1", + "is-wsl": "^2.2.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/opener": { + "version": "1.5.2", + "resolved": "https://registry.npmmirror.com/opener/-/opener-1.5.2.tgz", + "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==", + "dev": true, + "bin": { + "opener": "bin/opener-bin.js" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmmirror.com/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/ora": { + "version": "5.4.1", + "resolved": "https://registry.npmmirror.com/ora/-/ora-5.4.1.tgz", + "integrity": "sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ==", + "dependencies": { + "bl": "^4.1.0", + "chalk": "^4.1.0", + "cli-cursor": "^3.1.0", + "cli-spinners": "^2.5.0", + "is-interactive": "^1.0.0", + "is-unicode-supported": "^0.1.0", + "log-symbols": "^4.1.0", + "strip-ansi": "^6.0.0", + "wcwidth": "^1.0.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ora/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/ora/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/ora/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/ora/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/ora/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ora/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/os-browserify": { + "version": "0.3.0", + "resolved": "https://registry.npmmirror.com/os-browserify/-/os-browserify-0.3.0.tgz", + "integrity": "sha512-gjcpUc3clBf9+210TRaDWbf+rZZZEshZ+DlXMRCeAjp0xhTrnQsKHypIy1J3d5hKdUzj69t708EHtU8P6bUn0A==", + "peer": true + }, + "node_modules/os-tmpdir": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz", + "integrity": "sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/p-cancelable": { + "version": "0.4.1", + "resolved": "https://registry.npmmirror.com/p-cancelable/-/p-cancelable-0.4.1.tgz", + "integrity": "sha512-HNa1A8LvB1kie7cERyy21VNeHb2CWJJYqyyC2o3klWFfMGlFmWv2Z7sFgZH8ZiaYL95ydToKTFVXgMV/Os0bBQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/p-event": { + "version": "2.3.1", + "resolved": "https://registry.npmmirror.com/p-event/-/p-event-2.3.1.tgz", + "integrity": "sha512-NQCqOFhbpVTMX4qMe8PF8lbGtzZ+LCiN7pcNrb/413Na7+TRoe1xkKUzuWa/YEJdGQ0FvKtj35EEbDoVPO2kbA==", + "dependencies": { + "p-timeout": "^2.0.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/p-finally": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/p-finally/-/p-finally-1.0.0.tgz", + "integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==", + "engines": { + "node": ">=4" + } + }, + "node_modules/p-is-promise": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/p-is-promise/-/p-is-promise-1.1.0.tgz", + "integrity": "sha512-zL7VE4JVS2IFSkR2GQKDSPEVxkoH43/p7oEnwpdCndKYJO0HVeRB7fA8TJwuLOTBREtK0ea8eHaxdwcpob5dmg==", + "engines": { + "node": ">=4" + } + }, + "node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmmirror.com/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmmirror.com/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-retry": { + "version": "4.6.2", + "resolved": "https://registry.npmmirror.com/p-retry/-/p-retry-4.6.2.tgz", + "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", + "dev": true, + "dependencies": { + "@types/retry": "0.12.0", + "retry": "^0.13.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-timeout": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/p-timeout/-/p-timeout-2.0.1.tgz", + "integrity": "sha512-88em58dDVB/KzPEx1X0N3LwFfYZPyDc4B6eF38M1rk9VTZMbxXXgjugz8mmwpS9Ox4BDZ+t6t3QP5+/gazweIA==", + "dependencies": { + "p-finally": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmmirror.com/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/pako": { + "version": "1.0.11", + "resolved": "https://registry.npmmirror.com/pako/-/pako-1.0.11.tgz", + "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==" + }, + "node_modules/parallel-transform": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/parallel-transform/-/parallel-transform-1.2.0.tgz", + "integrity": "sha512-P2vSmIu38uIlvdcU7fDkyrxj33gTUy/ABO5ZUbGowxNCopBq/OoD42bP4UmMrJoPyk4Uqf0mu3mtWBhHCZD8yg==", + "peer": true, + "dependencies": { + "cyclist": "^1.0.1", + "inherits": "^2.0.3", + "readable-stream": "^2.1.5" + } + }, + "node_modules/param-case": { + "version": "3.0.4", + "resolved": "https://registry.npmmirror.com/param-case/-/param-case-3.0.4.tgz", + "integrity": "sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A==", + "dev": true, + "dependencies": { + "dot-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-asn1": { + "version": "5.1.7", + "resolved": "https://registry.npmmirror.com/parse-asn1/-/parse-asn1-5.1.7.tgz", + "integrity": "sha512-CTM5kuWR3sx9IFamcl5ErfPl6ea/N8IYwiJ+vpeB2g+1iknv7zBl5uPwbMbRVznRVbrNY6lGuDoE5b30grmbqg==", + "peer": true, + "dependencies": { + "asn1.js": "^4.10.1", + "browserify-aes": "^1.2.0", + "evp_bytestokey": "^1.0.3", + "hash-base": "~3.0", + "pbkdf2": "^3.1.2", + "safe-buffer": "^5.2.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/parse-git-config": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/parse-git-config/-/parse-git-config-3.0.0.tgz", + "integrity": "sha512-wXoQGL1D+2COYWCD35/xbiKma1Z15xvZL8cI25wvxzled58V51SJM04Urt/uznS900iQor7QO04SgdfT/XlbuA==", + "dependencies": { + "git-config-path": "^2.0.0", + "ini": "^1.3.5" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/parse-git-config/node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmmirror.com/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==" + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmmirror.com/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parse5": { + "version": "5.1.1", + "resolved": "https://registry.npmmirror.com/parse5/-/parse5-5.1.1.tgz", + "integrity": "sha512-ugq4DFI0Ptb+WWjAdOK16+u/nHfiIrcE+sh8kZMaM0WllQKLI9rOUq6c2b7cwPkXdzfQESqvoqK6ug7U/Yyzug==", + "dev": true + }, + "node_modules/parse5-htmlparser2-tree-adapter": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz", + "integrity": "sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA==", + "dev": true, + "dependencies": { + "parse5": "^6.0.1" + } + }, + "node_modules/parse5-htmlparser2-tree-adapter/node_modules/parse5": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/parse5/-/parse5-6.0.1.tgz", + "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==", + "dev": true + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmmirror.com/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/pascal-case": { + "version": "3.1.2", + "resolved": "https://registry.npmmirror.com/pascal-case/-/pascal-case-3.1.2.tgz", + "integrity": "sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==", + "dev": true, + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/pascalcase": { + "version": "0.1.1", + "resolved": "https://registry.npmmirror.com/pascalcase/-/pascalcase-0.1.1.tgz", + "integrity": "sha512-XHXfu/yOQRy9vYOtUDVMN60OEJjW013GoObG1o+xwQTpB9eYJX/BjXMsdW13ZDPruFhYYn0AG22w0xgQMwl3Nw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-browserify": { + "version": "0.0.1", + "resolved": "https://registry.npmmirror.com/path-browserify/-/path-browserify-0.0.1.tgz", + "integrity": "sha512-BapA40NHICOS+USX9SN4tyhq+A2RrN/Ws5F0Z5aMHDp98Fl86lX8Oti8B7uN93L4Ifv4fHOEA+pQw87gmMO/lQ==", + "peer": true + }, + "node_modules/path-dirname": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/path-dirname/-/path-dirname-1.0.2.tgz", + "integrity": "sha512-ALzNPpyNq9AqXMBjeymIjFDAkAFH06mHJH/cSBHAgU0s4vfpBn6b2nf8tiRLvagKD8RbTpq2FKTBg7cl9l3c7Q==", + "optional": true, + "peer": true + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmmirror.com/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmmirror.com/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" + }, + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmmirror.com/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "dev": true, + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-scurry/node_modules/lru-cache": { + "version": "10.2.2", + "resolved": "https://registry.npmmirror.com/lru-cache/-/lru-cache-10.2.2.tgz", + "integrity": "sha512-9hp3Vp2/hFQUiIwKo8XCeFVnrg8Pk3TYNPIR7tJADKi5YfcF7vEaK7avFHTlSy3kOKYaJQaalfEo6YuXdceBOQ==", + "dev": true, + "engines": { + "node": "14 || >=16.14" + } + }, + "node_modules/path-scurry/node_modules/minipass": { + "version": "7.1.1", + "resolved": "https://registry.npmmirror.com/minipass/-/minipass-7.1.1.tgz", + "integrity": "sha512-UZ7eQ+h8ywIRAW1hIEl2AqdwzJucU/Kp59+8kkZeSvafXhZjul247BvIJjEVFVeON6d7lM46XX1HXCduKAS8VA==", + "dev": true, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/path-to-regexp": { + "version": "0.1.7", + "resolved": "https://registry.npmmirror.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz", + "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/path2d-polyfill": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/path2d-polyfill/-/path2d-polyfill-2.0.1.tgz", + "integrity": "sha512-ad/3bsalbbWhmBo0D6FZ4RNMwsLsPpL6gnvhuSaU5Vm7b06Kr5ubSltQQ0T7YKsiJQO+g22zJ4dJKNTXIyOXtA==", + "engines": { + "node": ">=8" + } + }, + "node_modules/pbkdf2": { + "version": "3.1.2", + "resolved": "https://registry.npmmirror.com/pbkdf2/-/pbkdf2-3.1.2.tgz", + "integrity": "sha512-iuh7L6jA7JEGu2WxDwtQP1ddOpaJNC4KlDEFfdQajSGgGPNi4OyDc2R7QnbY2bR9QjBVGwgvTdNJZoE7RaxUMA==", + "peer": true, + "dependencies": { + "create-hash": "^1.1.2", + "create-hmac": "^1.1.4", + "ripemd160": "^2.0.1", + "safe-buffer": "^5.0.1", + "sha.js": "^2.4.8" + }, + "engines": { + "node": ">=0.12" + } + }, + "node_modules/pdf-lib": { + "version": "1.17.1", + "resolved": "https://registry.npmmirror.com/pdf-lib/-/pdf-lib-1.17.1.tgz", + "integrity": "sha512-V/mpyJAoTsN4cnP31vc0wfNA1+p20evqqnap0KLoRUN0Yk/p3wN52DOEsL4oBFcLdb76hlpKPtzJIgo67j/XLw==", + "dependencies": { + "@pdf-lib/standard-fonts": "^1.0.0", + "@pdf-lib/upng": "^1.0.1", + "pako": "^1.0.11", + "tslib": "^1.11.1" + } + }, + "node_modules/pdf-lib/node_modules/tslib": { + "version": "1.14.1", + "resolved": "https://registry.npmmirror.com/tslib/-/tslib-1.14.1.tgz", + "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==" + }, + "node_modules/pdfjs-dist": { + "version": "3.5.141", + "resolved": "https://registry.npmmirror.com/pdfjs-dist/-/pdfjs-dist-3.5.141.tgz", + "integrity": "sha512-lYIvyi5grtYOIatsfCifIKwxHeAJ8eHyP22DTdvY4pm0yWVSFQnMafpgCPSw8gaNRDDdcHnBVOkqMsyK8SRxZg==", + "dependencies": { + "path2d-polyfill": "^2.0.1", + "web-streams-polyfill": "^3.2.1" + }, + "engines": { + "node": ">=16" + }, + "optionalDependencies": { + "canvas": "^2.11.0" + } + }, + "node_modules/pdfobject": { + "version": "2.3.0", + "resolved": "https://registry.npmmirror.com/pdfobject/-/pdfobject-2.3.0.tgz", + "integrity": "sha512-w/9pXDXTDs3IDmOri/w8lM/w6LHR0/F4fcBLLzH+4csSoyshQ5su0TE7k0FLHZO7aOjVLDGecqd1M89+PVpVAA==" + }, + "node_modules/pend": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/pend/-/pend-1.2.0.tgz", + "integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==" + }, + "node_modules/picocolors": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/picocolors/-/picocolors-1.0.1.tgz", + "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmmirror.com/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pid-port": { + "version": "0.1.1", + "resolved": "https://registry.npmmirror.com/pid-port/-/pid-port-0.1.1.tgz", + "integrity": "sha512-boqPJtSgZC6KOgXKNPC+/XR3xwVtpOtaLa7JLcdf8jfVe0ZM2TwllBXxxLUO8GQbOLJ4/hEtf2+L1QCKbaoHUg==", + "dependencies": { + "execa": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pid-port/node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmmirror.com/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/pid-port/node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pid-port/node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pid-port/node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmmirror.com/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pify": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/pify/-/pify-3.0.0.tgz", + "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==", + "engines": { + "node": ">=4" + } + }, + "node_modules/pinkie": { + "version": "2.0.4", + "resolved": "https://registry.npmmirror.com/pinkie/-/pinkie-2.0.4.tgz", + "integrity": "sha512-MnUuEycAemtSaeFSjXKW/aroV7akBbY+Sv+RkyqFjgAe73F+MR0TBWKBRDkmfWq/HiFmdavfZ1G7h4SPZXaCSg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pinkie-promise": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz", + "integrity": "sha512-0Gni6D4UcLTbv9c57DfxDGdr41XfgUjqWZu492f0cIGr16zDU06BWP/RAEvOuo7CQ0CNjHaLlM59YJJFm3NWlw==", + "dependencies": { + "pinkie": "^2.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pirates": { + "version": "4.0.6", + "resolved": "https://registry.npmmirror.com/pirates/-/pirates-4.0.6.tgz", + "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==", + "engines": { + "node": ">= 6" + } + }, + "node_modules/pkg-dir": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/pkg-dir/-/pkg-dir-5.0.0.tgz", + "integrity": "sha512-NPE8TDbzl/3YQYY7CSS228s3g2ollTFnc+Qi3tqmqJp9Vg2ovUpixcJEo2HJScN2Ez+kEaal6y70c0ehqJBJeA==", + "dependencies": { + "find-up": "^5.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/pkg-dir/node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-dir/node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-dir/node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-dir/node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/portfinder": { + "version": "1.0.32", + "resolved": "https://registry.npmmirror.com/portfinder/-/portfinder-1.0.32.tgz", + "integrity": "sha512-on2ZJVVDXRADWE6jnQaX0ioEylzgBpQk8r55NE4wjXW1ZxO+BgDlY6DXwj20i0V8eB4SenDQ00WEaxfiIQPcxg==", + "dependencies": { + "async": "^2.6.4", + "debug": "^3.2.7", + "mkdirp": "^0.5.6" + }, + "engines": { + "node": ">= 0.12.0" + } + }, + "node_modules/portfinder/node_modules/async": { + "version": "2.6.4", + "resolved": "https://registry.npmmirror.com/async/-/async-2.6.4.tgz", + "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==", + "dependencies": { + "lodash": "^4.17.14" + } + }, + "node_modules/portfinder/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmmirror.com/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/posix-character-classes": { + "version": "0.1.1", + "resolved": "https://registry.npmmirror.com/posix-character-classes/-/posix-character-classes-0.1.1.tgz", + "integrity": "sha512-xTgYBc3fuo7Yt7JbiuFxSYGToMoz8fLoE6TC9Wx1P/u+LfeThMOAqmuyECnlBaaJb+u1m9hHiXUEtwW4OzfUJg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/possible-typed-array-names": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/possible-typed-array-names/-/possible-typed-array-names-1.0.0.tgz", + "integrity": "sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/postcss": { + "version": "8.4.38", + "resolved": "https://registry.npmmirror.com/postcss/-/postcss-8.4.38.tgz", + "integrity": "sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "nanoid": "^3.3.7", + "picocolors": "^1.0.0", + "source-map-js": "^1.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-calc": { + "version": "8.2.4", + "resolved": "https://registry.npmmirror.com/postcss-calc/-/postcss-calc-8.2.4.tgz", + "integrity": "sha512-SmWMSJmB8MRnnULldx0lQIyhSNvuDl9HfrZkaqqE/WHAhToYsAvDq+yAsA/kIyINDszOp3Rh0GFoNuH5Ypsm3Q==", + "dev": true, + "dependencies": { + "postcss-selector-parser": "^6.0.9", + "postcss-value-parser": "^4.2.0" + }, + "peerDependencies": { + "postcss": "^8.2.2" + } + }, + "node_modules/postcss-colormin": { + "version": "5.3.1", + "resolved": "https://registry.npmmirror.com/postcss-colormin/-/postcss-colormin-5.3.1.tgz", + "integrity": "sha512-UsWQG0AqTFQmpBegeLLc1+c3jIqBNB0zlDGRWR+dQ3pRKJL1oeMzyqmH3o2PIfn9MBdNrVPWhDbT769LxCTLJQ==", + "dev": true, + "dependencies": { + "browserslist": "^4.21.4", + "caniuse-api": "^3.0.0", + "colord": "^2.9.1", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-convert-values": { + "version": "5.1.3", + "resolved": "https://registry.npmmirror.com/postcss-convert-values/-/postcss-convert-values-5.1.3.tgz", + "integrity": "sha512-82pC1xkJZtcJEfiLw6UXnXVXScgtBrjlO5CBmuDQc+dlb88ZYheFsjTn40+zBVi3DkfF7iezO0nJUPLcJK3pvA==", + "dev": true, + "dependencies": { + "browserslist": "^4.21.4", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-discard-comments": { + "version": "5.1.2", + "resolved": "https://registry.npmmirror.com/postcss-discard-comments/-/postcss-discard-comments-5.1.2.tgz", + "integrity": "sha512-+L8208OVbHVF2UQf1iDmRcbdjJkuBF6IS29yBDSiWUIzpYaAhtNl6JYnYm12FnkeCwQqF5LeklOu6rAqgfBZqQ==", + "dev": true, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-discard-duplicates": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/postcss-discard-duplicates/-/postcss-discard-duplicates-5.1.0.tgz", + "integrity": "sha512-zmX3IoSI2aoenxHV6C7plngHWWhUOV3sP1T8y2ifzxzbtnuhk1EdPwm0S1bIUNaJ2eNbWeGLEwzw8huPD67aQw==", + "dev": true, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-discard-empty": { + "version": "5.1.1", + "resolved": "https://registry.npmmirror.com/postcss-discard-empty/-/postcss-discard-empty-5.1.1.tgz", + "integrity": "sha512-zPz4WljiSuLWsI0ir4Mcnr4qQQ5e1Ukc3i7UfE2XcrwKK2LIPIqE5jxMRxO6GbI3cv//ztXDsXwEWT3BHOGh3A==", + "dev": true, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-discard-overridden": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/postcss-discard-overridden/-/postcss-discard-overridden-5.1.0.tgz", + "integrity": "sha512-21nOL7RqWR1kasIVdKs8HNqQJhFxLsyRfAnUDm4Fe4t4mCWL9OJiHvlHPjcd8zc5Myu89b/7wZDnOSjFgeWRtw==", + "dev": true, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-loader": { + "version": "6.2.1", + "resolved": "https://registry.npmmirror.com/postcss-loader/-/postcss-loader-6.2.1.tgz", + "integrity": "sha512-WbbYpmAaKcux/P66bZ40bpWsBucjx/TTgVVzRZ9yUO8yQfVBlameJ0ZGVaPfH64hNSBh63a+ICP5nqOpBA0w+Q==", + "dev": true, + "dependencies": { + "cosmiconfig": "^7.0.0", + "klona": "^2.0.5", + "semver": "^7.3.5" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "postcss": "^7.0.0 || ^8.0.1", + "webpack": "^5.0.0" + } + }, + "node_modules/postcss-loader/node_modules/cosmiconfig": { + "version": "7.1.0", + "resolved": "https://registry.npmmirror.com/cosmiconfig/-/cosmiconfig-7.1.0.tgz", + "integrity": "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==", + "dev": true, + "dependencies": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/postcss-loader/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/postcss-merge-longhand": { + "version": "5.1.7", + "resolved": "https://registry.npmmirror.com/postcss-merge-longhand/-/postcss-merge-longhand-5.1.7.tgz", + "integrity": "sha512-YCI9gZB+PLNskrK0BB3/2OzPnGhPkBEwmwhfYk1ilBHYVAZB7/tkTHFBAnCrvBBOmeYyMYw3DMjT55SyxMBzjQ==", + "dev": true, + "dependencies": { + "postcss-value-parser": "^4.2.0", + "stylehacks": "^5.1.1" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-merge-rules": { + "version": "5.1.4", + "resolved": "https://registry.npmmirror.com/postcss-merge-rules/-/postcss-merge-rules-5.1.4.tgz", + "integrity": "sha512-0R2IuYpgU93y9lhVbO/OylTtKMVcHb67zjWIfCiKR9rWL3GUk1677LAqD/BcHizukdZEjT8Ru3oHRoAYoJy44g==", + "dev": true, + "dependencies": { + "browserslist": "^4.21.4", + "caniuse-api": "^3.0.0", + "cssnano-utils": "^3.1.0", + "postcss-selector-parser": "^6.0.5" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-minify-font-values": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/postcss-minify-font-values/-/postcss-minify-font-values-5.1.0.tgz", + "integrity": "sha512-el3mYTgx13ZAPPirSVsHqFzl+BBBDrXvbySvPGFnQcTI4iNslrPaFq4muTkLZmKlGk4gyFAYUBMH30+HurREyA==", + "dev": true, + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-minify-gradients": { + "version": "5.1.1", + "resolved": "https://registry.npmmirror.com/postcss-minify-gradients/-/postcss-minify-gradients-5.1.1.tgz", + "integrity": "sha512-VGvXMTpCEo4qHTNSa9A0a3D+dxGFZCYwR6Jokk+/3oB6flu2/PnPXAh2x7x52EkY5xlIHLm+Le8tJxe/7TNhzw==", + "dev": true, + "dependencies": { + "colord": "^2.9.1", + "cssnano-utils": "^3.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-minify-params": { + "version": "5.1.4", + "resolved": "https://registry.npmmirror.com/postcss-minify-params/-/postcss-minify-params-5.1.4.tgz", + "integrity": "sha512-+mePA3MgdmVmv6g+30rn57USjOGSAyuxUmkfiWpzalZ8aiBkdPYjXWtHuwJGm1v5Ojy0Z0LaSYhHaLJQB0P8Jw==", + "dev": true, + "dependencies": { + "browserslist": "^4.21.4", + "cssnano-utils": "^3.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-minify-selectors": { + "version": "5.2.1", + "resolved": "https://registry.npmmirror.com/postcss-minify-selectors/-/postcss-minify-selectors-5.2.1.tgz", + "integrity": "sha512-nPJu7OjZJTsVUmPdm2TcaiohIwxP+v8ha9NehQ2ye9szv4orirRU3SDdtUmKH+10nzn0bAyOXZ0UEr7OpvLehg==", + "dev": true, + "dependencies": { + "postcss-selector-parser": "^6.0.5" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-modules-extract-imports": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.1.0.tgz", + "integrity": "sha512-k3kNe0aNFQDAZGbin48pL2VNidTF0w4/eASDsxlyspobzU3wZQLOGj7L9gfRe0Jo9/4uud09DsjFNH7winGv8Q==", + "dev": true, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-local-by-default": { + "version": "4.0.5", + "resolved": "https://registry.npmmirror.com/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.5.tgz", + "integrity": "sha512-6MieY7sIfTK0hYfafw1OMEG+2bg8Q1ocHCpoWLqOKj3JXlKu4G7btkmM/B7lFubYkYWmRSPLZi5chid63ZaZYw==", + "dev": true, + "dependencies": { + "icss-utils": "^5.0.0", + "postcss-selector-parser": "^6.0.2", + "postcss-value-parser": "^4.1.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-scope": { + "version": "3.2.0", + "resolved": "https://registry.npmmirror.com/postcss-modules-scope/-/postcss-modules-scope-3.2.0.tgz", + "integrity": "sha512-oq+g1ssrsZOsx9M96c5w8laRmvEu9C3adDSjI8oTcbfkrTE8hx/zfyobUoWIxaKPO8bt6S62kxpw5GqypEw1QQ==", + "dev": true, + "dependencies": { + "postcss-selector-parser": "^6.0.4" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-values": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz", + "integrity": "sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==", + "dev": true, + "dependencies": { + "icss-utils": "^5.0.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-normalize-charset": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/postcss-normalize-charset/-/postcss-normalize-charset-5.1.0.tgz", + "integrity": "sha512-mSgUJ+pd/ldRGVx26p2wz9dNZ7ji6Pn8VWBajMXFf8jk7vUoSrZ2lt/wZR7DtlZYKesmZI680qjr2CeFF2fbUg==", + "dev": true, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-display-values": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/postcss-normalize-display-values/-/postcss-normalize-display-values-5.1.0.tgz", + "integrity": "sha512-WP4KIM4o2dazQXWmFaqMmcvsKmhdINFblgSeRgn8BJ6vxaMyaJkwAzpPpuvSIoG/rmX3M+IrRZEz2H0glrQNEA==", + "dev": true, + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-positions": { + "version": "5.1.1", + "resolved": "https://registry.npmmirror.com/postcss-normalize-positions/-/postcss-normalize-positions-5.1.1.tgz", + "integrity": "sha512-6UpCb0G4eofTCQLFVuI3EVNZzBNPiIKcA1AKVka+31fTVySphr3VUgAIULBhxZkKgwLImhzMR2Bw1ORK+37INg==", + "dev": true, + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-repeat-style": { + "version": "5.1.1", + "resolved": "https://registry.npmmirror.com/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-5.1.1.tgz", + "integrity": "sha512-mFpLspGWkQtBcWIRFLmewo8aC3ImN2i/J3v8YCFUwDnPu3Xz4rLohDO26lGjwNsQxB3YF0KKRwspGzE2JEuS0g==", + "dev": true, + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-string": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/postcss-normalize-string/-/postcss-normalize-string-5.1.0.tgz", + "integrity": "sha512-oYiIJOf4T9T1N4i+abeIc7Vgm/xPCGih4bZz5Nm0/ARVJ7K6xrDlLwvwqOydvyL3RHNf8qZk6vo3aatiw/go3w==", + "dev": true, + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-timing-functions": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-5.1.0.tgz", + "integrity": "sha512-DOEkzJ4SAXv5xkHl0Wa9cZLF3WCBhF3o1SKVxKQAa+0pYKlueTpCgvkFAHfk+Y64ezX9+nITGrDZeVGgITJXjg==", + "dev": true, + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-unicode": { + "version": "5.1.1", + "resolved": "https://registry.npmmirror.com/postcss-normalize-unicode/-/postcss-normalize-unicode-5.1.1.tgz", + "integrity": "sha512-qnCL5jzkNUmKVhZoENp1mJiGNPcsJCs1aaRmURmeJGES23Z/ajaln+EPTD+rBeNkSryI+2WTdW+lwcVdOikrpA==", + "dev": true, + "dependencies": { + "browserslist": "^4.21.4", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-url": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/postcss-normalize-url/-/postcss-normalize-url-5.1.0.tgz", + "integrity": "sha512-5upGeDO+PVthOxSmds43ZeMeZfKH+/DKgGRD7TElkkyS46JXAUhMzIKiCa7BabPeIy3AQcTkXwVVN7DbqsiCew==", + "dev": true, + "dependencies": { + "normalize-url": "^6.0.1", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-url/node_modules/normalize-url": { + "version": "6.1.0", + "resolved": "https://registry.npmmirror.com/normalize-url/-/normalize-url-6.1.0.tgz", + "integrity": "sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/postcss-normalize-whitespace": { + "version": "5.1.1", + "resolved": "https://registry.npmmirror.com/postcss-normalize-whitespace/-/postcss-normalize-whitespace-5.1.1.tgz", + "integrity": "sha512-83ZJ4t3NUDETIHTa3uEg6asWjSBYL5EdkVB0sDncx9ERzOKBVJIUeDO9RyA9Zwtig8El1d79HBp0JEi8wvGQnA==", + "dev": true, + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-ordered-values": { + "version": "5.1.3", + "resolved": "https://registry.npmmirror.com/postcss-ordered-values/-/postcss-ordered-values-5.1.3.tgz", + "integrity": "sha512-9UO79VUhPwEkzbb3RNpqqghc6lcYej1aveQteWY+4POIwlqkYE21HKWaLDF6lWNuqCobEAyTovVhtI32Rbv2RQ==", + "dev": true, + "dependencies": { + "cssnano-utils": "^3.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-reduce-initial": { + "version": "5.1.2", + "resolved": "https://registry.npmmirror.com/postcss-reduce-initial/-/postcss-reduce-initial-5.1.2.tgz", + "integrity": "sha512-dE/y2XRaqAi6OvjzD22pjTUQ8eOfc6m/natGHgKFBK9DxFmIm69YmaRVQrGgFlEfc1HePIurY0TmDeROK05rIg==", + "dev": true, + "dependencies": { + "browserslist": "^4.21.4", + "caniuse-api": "^3.0.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-reduce-transforms": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/postcss-reduce-transforms/-/postcss-reduce-transforms-5.1.0.tgz", + "integrity": "sha512-2fbdbmgir5AvpW9RLtdONx1QoYG2/EtqpNQbFASDlixBbAYuTcJ0dECwlqNqH7VbaUnEnh8SrxOe2sRIn24XyQ==", + "dev": true, + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.0.16", + "resolved": "https://registry.npmmirror.com/postcss-selector-parser/-/postcss-selector-parser-6.0.16.tgz", + "integrity": "sha512-A0RVJrX+IUkVZbW3ClroRWurercFhieevHB38sr2+l9eUClMqome3LmEmnhlNy+5Mr2EYN6B2Kaw9wYdd+VHiw==", + "dev": true, + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-svgo": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/postcss-svgo/-/postcss-svgo-5.1.0.tgz", + "integrity": "sha512-D75KsH1zm5ZrHyxPakAxJWtkyXew5qwS70v56exwvw542d9CRtTo78K0WeFxZB4G7JXKKMbEZtZayTGdIky/eA==", + "dev": true, + "dependencies": { + "postcss-value-parser": "^4.2.0", + "svgo": "^2.7.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-unique-selectors": { + "version": "5.1.1", + "resolved": "https://registry.npmmirror.com/postcss-unique-selectors/-/postcss-unique-selectors-5.1.1.tgz", + "integrity": "sha512-5JiODlELrz8L2HwxfPnhOWZYWDxVHWL83ufOv84NrcgipI7TaeRsatAhK4Tr2/ZiYldpK/wBvw5BD3qfaK96GA==", + "dev": true, + "dependencies": { + "postcss-selector-parser": "^6.0.5" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmmirror.com/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", + "dev": true + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmmirror.com/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/prepend-http": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/prepend-http/-/prepend-http-2.0.0.tgz", + "integrity": "sha512-ravE6m9Atw9Z/jjttRUZ+clIXogdghyZAuWJ3qEzjT+jI/dL1ifAqhZeC5VHzQp1MSt1+jxKkFNemj/iO7tVUA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/prettier": { + "version": "2.8.8", + "resolved": "https://registry.npmmirror.com/prettier/-/prettier-2.8.8.tgz", + "integrity": "sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==", + "optional": true, + "bin": { + "prettier": "bin-prettier.js" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, + "node_modules/pretty": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/pretty/-/pretty-2.0.0.tgz", + "integrity": "sha512-G9xUchgTEiNpormdYBl+Pha50gOUovT18IvAe7EYMZ1/f9W/WWMPRn+xI68yXNMUk3QXHDwo/1wV/4NejVNe1w==", + "dev": true, + "dependencies": { + "condense-newlines": "^0.2.1", + "extend-shallow": "^2.0.1", + "js-beautify": "^1.6.12" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pretty-error": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/pretty-error/-/pretty-error-4.0.0.tgz", + "integrity": "sha512-AoJ5YMAcXKYxKhuJGdcvse+Voc6v1RgnsR3nWcYU7q4t6z0Q6T86sv5Zq8VIRbOWWFpvdGE83LtdSMNd+6Y0xw==", + "dev": true, + "dependencies": { + "lodash": "^4.17.20", + "renderkid": "^3.0.0" + } + }, + "node_modules/pretty-format": { + "version": "27.5.1", + "resolved": "https://registry.npmmirror.com/pretty-format/-/pretty-format-27.5.1.tgz", + "integrity": "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1", + "ansi-styles": "^5.0.0", + "react-is": "^17.0.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/prismjs": { + "version": "1.29.0", + "resolved": "https://registry.npmmirror.com/prismjs/-/prismjs-1.29.0.tgz", + "integrity": "sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==", + "engines": { + "node": ">=6" + } + }, + "node_modules/private": { + "version": "0.1.8", + "resolved": "https://registry.npmmirror.com/private/-/private-0.1.8.tgz", + "integrity": "sha512-VvivMrbvd2nKkiG38qjULzlc+4Vx4wm/whI9pQD35YrARNnhxeiRktSOhSukRLFNlzg6Br/cJPet5J/u19r/mg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/process": { + "version": "0.11.10", + "resolved": "https://registry.npmmirror.com/process/-/process-0.11.10.tgz", + "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==", + "peer": true, + "engines": { + "node": ">= 0.6.0" + } + }, + "node_modules/process-exists": { + "version": "4.1.0", + "resolved": "https://registry.npmmirror.com/process-exists/-/process-exists-4.1.0.tgz", + "integrity": "sha512-BBJoiorUKoP2AuM5q/yKwIfT1YWRHsaxjW+Ayu9erLhqKOfnXzzVVML0XTYoQZuI1YvcWKmc1dh06DEy4+KzfA==", + "dependencies": { + "ps-list": "^6.3.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/process-exists/node_modules/ps-list": { + "version": "6.3.0", + "resolved": "https://registry.npmmirror.com/ps-list/-/ps-list-6.3.0.tgz", + "integrity": "sha512-qau0czUSB0fzSlBOQt0bo+I2v6R+xiQdj78e1BR/Qjfl5OHWJ/urXi8+ilw1eHe+5hSeDI1wrwVTgDp2wst4oA==", + "engines": { + "node": ">=8" + } + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + }, + "node_modules/progress": { + "version": "2.0.3", + "resolved": "https://registry.npmmirror.com/progress/-/progress-2.0.3.tgz", + "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", + "dev": true, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/progress-webpack-plugin": { + "version": "1.0.16", + "resolved": "https://registry.npmmirror.com/progress-webpack-plugin/-/progress-webpack-plugin-1.0.16.tgz", + "integrity": "sha512-sdiHuuKOzELcBANHfrupYo+r99iPRyOnw15qX+rNlVUqXGfjXdH4IgxriKwG1kNJwVswKQHMdj1hYZMcb9jFaA==", + "dev": true, + "dependencies": { + "chalk": "^2.1.0", + "figures": "^2.0.0", + "log-update": "^2.3.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "peerDependencies": { + "webpack": "^2.0.0 || ^3.0.0 || ^4.0.0 || ^5.0.0" + } + }, + "node_modules/progress-webpack-plugin/node_modules/figures": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/figures/-/figures-2.0.0.tgz", + "integrity": "sha512-Oa2M9atig69ZkfwiApY8F2Yy+tzMbazyvqv21R0NsSC8floSOC09BbT1ITWAdoMGQvJ/aZnR1KMwdx9tvHnTNA==", + "dev": true, + "dependencies": { + "escape-string-regexp": "^1.0.5" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/promise-inflight": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/promise-inflight/-/promise-inflight-1.0.1.tgz", + "integrity": "sha512-6zWPyEOFaQBJYcGMHBKTKJ3u6TBsnMFOIZSa6ce1e/ZrrsOlnHRHbabMjLiBYKp+n44X9eUI6VUPaukCXHuG4g==", + "peer": true + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmmirror.com/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dev": true, + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/proto-list": { + "version": "1.2.4", + "resolved": "https://registry.npmmirror.com/proto-list/-/proto-list-1.2.4.tgz", + "integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==" + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmmirror.com/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" + }, + "node_modules/prr": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/prr/-/prr-1.0.1.tgz", + "integrity": "sha512-yPw4Sng1gWghHQWj0B3ZggWUm4qVbPwPFcRG8KyxiU7J2OHFSoEHKS+EZ3fv5l1t9CyCiop6l/ZYeWbrgoQejw==", + "peer": true + }, + "node_modules/ps-list": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/ps-list/-/ps-list-7.2.0.tgz", + "integrity": "sha512-v4Bl6I3f2kJfr5o80ShABNHAokIgY+wFDTQfE+X3zWYgSGQOCBeYptLZUpoOALBqO5EawmDN/tjTldJesd0ujQ==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pseudomap": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/pseudomap/-/pseudomap-1.0.2.tgz", + "integrity": "sha512-b/YwNhb8lk1Zz2+bXXpS/LK9OisiZZ1SNsSLxN1x2OXVEhW2Ckr/7mWE5vrC1ZTiJlD9g19jWszTmJsB+oEpFQ==", + "dev": true + }, + "node_modules/psl": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/psl/-/psl-1.9.0.tgz", + "integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==", + "dev": true + }, + "node_modules/public-encrypt": { + "version": "4.0.3", + "resolved": "https://registry.npmmirror.com/public-encrypt/-/public-encrypt-4.0.3.tgz", + "integrity": "sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q==", + "peer": true, + "dependencies": { + "bn.js": "^4.1.0", + "browserify-rsa": "^4.0.0", + "create-hash": "^1.1.0", + "parse-asn1": "^5.0.0", + "randombytes": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "node_modules/public-encrypt/node_modules/bn.js": { + "version": "4.12.0", + "resolved": "https://registry.npmmirror.com/bn.js/-/bn.js-4.12.0.tgz", + "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==", + "peer": true + }, + "node_modules/pump": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/pump/-/pump-3.0.0.tgz", + "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "node_modules/pumpify": { + "version": "1.5.1", + "resolved": "https://registry.npmmirror.com/pumpify/-/pumpify-1.5.1.tgz", + "integrity": "sha512-oClZI37HvuUJJxSKKrC17bZ9Cu0ZYhEAGPsPUy9KlMUmv9dKX2o77RUmq7f3XjIxbwyGwYzbzQ1L2Ks8sIradQ==", + "peer": true, + "dependencies": { + "duplexify": "^3.6.0", + "inherits": "^2.0.3", + "pump": "^2.0.0" + } + }, + "node_modules/pumpify/node_modules/pump": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/pump/-/pump-2.0.1.tgz", + "integrity": "sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==", + "peer": true, + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmmirror.com/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/qs": { + "version": "6.11.0", + "resolved": "https://registry.npmmirror.com/qs/-/qs-6.11.0.tgz", + "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "dependencies": { + "side-channel": "^1.0.4" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/query-string": { + "version": "5.1.1", + "resolved": "https://registry.npmmirror.com/query-string/-/query-string-5.1.1.tgz", + "integrity": "sha512-gjWOsm2SoGlgLEdAGt7a6slVOk9mGiXmPFMqrEhLQ68rhQuBnpfs3+EmlvqKyxnCo9/PPlF+9MtY02S1aFg+Jw==", + "dependencies": { + "decode-uri-component": "^0.2.0", + "object-assign": "^4.1.0", + "strict-uri-encode": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/querystring-es3": { + "version": "0.2.1", + "resolved": "https://registry.npmmirror.com/querystring-es3/-/querystring-es3-0.2.1.tgz", + "integrity": "sha512-773xhDQnZBMFobEiztv8LIl70ch5MSF/jUQVlhwFyBILqq96anmoctVIYz+ZRp0qbCKATTn6ev02M3r7Ga5vqA==", + "peer": true, + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/querystringify": { + "version": "2.2.0", + "resolved": "https://registry.npmmirror.com/querystringify/-/querystringify-2.2.0.tgz", + "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==", + "dev": true + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmmirror.com/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/randomfill": { + "version": "1.0.4", + "resolved": "https://registry.npmmirror.com/randomfill/-/randomfill-1.0.4.tgz", + "integrity": "sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==", + "peer": true, + "dependencies": { + "randombytes": "^2.0.5", + "safe-buffer": "^5.1.0" + } + }, + "node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmmirror.com/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.2", + "resolved": "https://registry.npmmirror.com/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", + "dependencies": { + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/raw-loader": { + "version": "4.0.2", + "resolved": "https://registry.npmmirror.com/raw-loader/-/raw-loader-4.0.2.tgz", + "integrity": "sha512-ZnScIV3ag9A4wPX/ZayxL/jZH+euYb6FcUinPcgiQW0+UBtEv0O6Q3lGd3cqJ+GHH+rksEv3Pj99oxJ3u3VIKA==", + "dependencies": { + "loader-utils": "^2.0.0", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/raw-loader/node_modules/loader-utils": { + "version": "2.0.4", + "resolved": "https://registry.npmmirror.com/loader-utils/-/loader-utils-2.0.4.tgz", + "integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==", + "dependencies": { + "big.js": "^5.2.2", + "emojis-list": "^3.0.0", + "json5": "^2.1.2" + }, + "engines": { + "node": ">=8.9.0" + } + }, + "node_modules/raw-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmmirror.com/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/react-is": { + "version": "17.0.2", + "resolved": "https://registry.npmmirror.com/react-is/-/react-is-17.0.2.tgz", + "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", + "dev": true + }, + "node_modules/read-pkg": { + "version": "5.2.0", + "resolved": "https://registry.npmmirror.com/read-pkg/-/read-pkg-5.2.0.tgz", + "integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==", + "dependencies": { + "@types/normalize-package-data": "^2.4.0", + "normalize-package-data": "^2.5.0", + "parse-json": "^5.0.0", + "type-fest": "^0.6.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/read-pkg-up": { + "version": "7.0.1", + "resolved": "https://registry.npmmirror.com/read-pkg-up/-/read-pkg-up-7.0.1.tgz", + "integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==", + "dev": true, + "dependencies": { + "find-up": "^4.1.0", + "read-pkg": "^5.2.0", + "type-fest": "^0.8.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/read-pkg-up/node_modules/type-fest": { + "version": "0.8.1", + "resolved": "https://registry.npmmirror.com/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/read-pkg/node_modules/type-fest": { + "version": "0.6.0", + "resolved": "https://registry.npmmirror.com/type-fest/-/type-fest-0.6.0.tgz", + "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmmirror.com/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/readable-stream/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmmirror.com/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmmirror.com/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "devOptional": true, + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/recast": { + "version": "0.20.5", + "resolved": "https://registry.npmmirror.com/recast/-/recast-0.20.5.tgz", + "integrity": "sha512-E5qICoPoNL4yU0H0NoBDntNB0Q5oMSNh9usFctYniLBluTthi3RsQVBXIJNbApOlvSwW/RGxIuokPcAc59J5fQ==", + "dependencies": { + "ast-types": "0.14.2", + "esprima": "~4.0.0", + "source-map": "~0.6.1", + "tslib": "^2.0.1" + }, + "engines": { + "node": ">= 4" + } + }, + "node_modules/rechoir": { + "version": "0.8.0", + "resolved": "https://registry.npmmirror.com/rechoir/-/rechoir-0.8.0.tgz", + "integrity": "sha512-/vxpCXddiX8NGfGO/mTafwjq4aFa/71pvamip0++IQk3zG8cbCj0fifNPrjjF1XMXUne91jL9OoxmdykoEtifQ==", + "dependencies": { + "resolve": "^1.20.0" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/regenerate": { + "version": "1.4.2", + "resolved": "https://registry.npmmirror.com/regenerate/-/regenerate-1.4.2.tgz", + "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==" + }, + "node_modules/regenerate-unicode-properties": { + "version": "10.1.1", + "resolved": "https://registry.npmmirror.com/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.1.tgz", + "integrity": "sha512-X007RyZLsCJVVrjgEFVpLUTZwyOZk3oiL75ZcuYjlIWd6rNJtOjkBwQc5AsRrpbKVkxN6sklw/k/9m2jJYOf8Q==", + "dependencies": { + "regenerate": "^1.4.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/regenerator-runtime": { + "version": "0.14.1", + "resolved": "https://registry.npmmirror.com/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", + "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" + }, + "node_modules/regenerator-transform": { + "version": "0.15.2", + "resolved": "https://registry.npmmirror.com/regenerator-transform/-/regenerator-transform-0.15.2.tgz", + "integrity": "sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg==", + "dependencies": { + "@babel/runtime": "^7.8.4" + } + }, + "node_modules/regex-not": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/regex-not/-/regex-not-1.0.2.tgz", + "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==", + "dependencies": { + "extend-shallow": "^3.0.2", + "safe-regex": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/regex-not/node_modules/extend-shallow": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/extend-shallow/-/extend-shallow-3.0.2.tgz", + "integrity": "sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==", + "dependencies": { + "assign-symbols": "^1.0.0", + "is-extendable": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/regex-not/node_modules/is-extendable": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/is-extendable/-/is-extendable-1.0.1.tgz", + "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "dependencies": { + "is-plain-object": "^2.0.4" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/regexpp": { + "version": "3.2.0", + "resolved": "https://registry.npmmirror.com/regexpp/-/regexpp-3.2.0.tgz", + "integrity": "sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/mysticatea" + } + }, + "node_modules/regexpu-core": { + "version": "5.3.2", + "resolved": "https://registry.npmmirror.com/regexpu-core/-/regexpu-core-5.3.2.tgz", + "integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==", + "dependencies": { + "@babel/regjsgen": "^0.8.0", + "regenerate": "^1.4.2", + "regenerate-unicode-properties": "^10.1.0", + "regjsparser": "^0.9.1", + "unicode-match-property-ecmascript": "^2.0.0", + "unicode-match-property-value-ecmascript": "^2.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/regjsparser": { + "version": "0.9.1", + "resolved": "https://registry.npmmirror.com/regjsparser/-/regjsparser-0.9.1.tgz", + "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==", + "dependencies": { + "jsesc": "~0.5.0" + }, + "bin": { + "regjsparser": "bin/parser" + } + }, + "node_modules/regjsparser/node_modules/jsesc": { + "version": "0.5.0", + "resolved": "https://registry.npmmirror.com/jsesc/-/jsesc-0.5.0.tgz", + "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==", + "bin": { + "jsesc": "bin/jsesc" + } + }, + "node_modules/relateurl": { + "version": "0.2.7", + "resolved": "https://registry.npmmirror.com/relateurl/-/relateurl-0.2.7.tgz", + "integrity": "sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog==", + "dev": true, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/remove-trailing-separator": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz", + "integrity": "sha512-/hS+Y0u3aOfIETiaiirUFwDBDzmXPvO+jAfKTitUngIPzdKc6Z0LoFjM/CK5PL4C+eKwHohlHAb6H0VFfmmUsw==", + "optional": true, + "peer": true + }, + "node_modules/renderkid": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/renderkid/-/renderkid-3.0.0.tgz", + "integrity": "sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg==", + "dev": true, + "dependencies": { + "css-select": "^4.1.3", + "dom-converter": "^0.2.0", + "htmlparser2": "^6.1.0", + "lodash": "^4.17.21", + "strip-ansi": "^6.0.1" + } + }, + "node_modules/repeat-element": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/repeat-element/-/repeat-element-1.1.4.tgz", + "integrity": "sha512-LFiNfRcSu7KK3evMyYOuCzv3L10TW7yC1G2/+StMjK8Y6Vqd2MG7r/Qjw4ghtuCOjFvlnms/iMmLqpvW/ES/WQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmmirror.com/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmmirror.com/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", + "dev": true + }, + "node_modules/resize-observer-polyfill": { + "version": "1.5.1", + "resolved": "https://registry.npmmirror.com/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz", + "integrity": "sha512-LwZrotdHOo12nQuZlHEmtuXdqGoOD0OhaxopaNFxWzInpEgaLWoVuAMbTzixuosCx2nEG58ngzW3vxdWoxIgdg==" + }, + "node_modules/resolve": { + "version": "1.22.8", + "resolved": "https://registry.npmmirror.com/resolve/-/resolve-1.22.8.tgz", + "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-cwd": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/resolve-cwd/-/resolve-cwd-3.0.0.tgz", + "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", + "dependencies": { + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-url": { + "version": "0.2.1", + "resolved": "https://registry.npmmirror.com/resolve-url/-/resolve-url-0.2.1.tgz", + "integrity": "sha512-ZuF55hVUQaaczgOIwqWzkEcEidmlD/xl44x1UZnhOXcYuFN2S6+rcxpG+C1N3So0wvNI3DmJICUFfu2SxhBmvg==", + "deprecated": "https://github.com/lydell/resolve-url#deprecated" + }, + "node_modules/resolve.exports": { + "version": "1.1.1", + "resolved": "https://registry.npmmirror.com/resolve.exports/-/resolve.exports-1.1.1.tgz", + "integrity": "sha512-/NtpHNDN7jWhAaQ9BvBUYZ6YTXsRBgfqWFWP7BZBaoMJO/I3G5OFzvTuWNlZC3aPjins1F+TNrLKsGbH4rfsRQ==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/responselike": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/responselike/-/responselike-1.0.2.tgz", + "integrity": "sha512-/Fpe5guzJk1gPqdJLJR5u7eG/gNY4nImjbRDaVWVMRhne55TCmj2i9Q+54PBRfatRC8v/rIiv9BN0pMd9OV5EQ==", + "dependencies": { + "lowercase-keys": "^1.0.0" + } + }, + "node_modules/restore-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/restore-cursor/-/restore-cursor-3.1.0.tgz", + "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", + "dependencies": { + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ret": { + "version": "0.1.15", + "resolved": "https://registry.npmmirror.com/ret/-/ret-0.1.15.tgz", + "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==", + "engines": { + "node": ">=0.12" + } + }, + "node_modules/retry": { + "version": "0.13.1", + "resolved": "https://registry.npmmirror.com/retry/-/retry-0.13.1.tgz", + "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmmirror.com/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/ripemd160": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/ripemd160/-/ripemd160-2.0.2.tgz", + "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==", + "peer": true, + "dependencies": { + "hash-base": "^3.0.0", + "inherits": "^2.0.1" + } + }, + "node_modules/rss-parser": { + "version": "3.13.0", + "resolved": "https://registry.npmmirror.com/rss-parser/-/rss-parser-3.13.0.tgz", + "integrity": "sha512-7jWUBV5yGN3rqMMj7CZufl/291QAhvrrGpDNE4k/02ZchL0npisiYYqULF71jCEKoIiHvK/Q2e6IkDwPziT7+w==", + "dependencies": { + "entities": "^2.0.3", + "xml2js": "^0.5.0" + } + }, + "node_modules/run-async": { + "version": "2.4.1", + "resolved": "https://registry.npmmirror.com/run-async/-/run-async-2.4.1.tgz", + "integrity": "sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/run-queue": { + "version": "1.0.3", + "resolved": "https://registry.npmmirror.com/run-queue/-/run-queue-1.0.3.tgz", + "integrity": "sha512-ntymy489o0/QQplUDnpYAYUsO50K9SBrIVaKCWDOJzYJts0f9WH9RFJkyagebkw5+y1oi00R7ynNW/d12GBumg==", + "peer": true, + "dependencies": { + "aproba": "^1.1.1" + } + }, + "node_modules/rxjs": { + "version": "7.8.1", + "resolved": "https://registry.npmmirror.com/rxjs/-/rxjs-7.8.1.tgz", + "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==", + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmmirror.com/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safe-regex": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/safe-regex/-/safe-regex-1.1.0.tgz", + "integrity": "sha512-aJXcif4xnaNUzvUuC5gcb46oTS7zvg4jpMTnuqtrEPlR3vFr4pxtdTwaF1Qs3Enjn9HK+ZlwQui+a7z0SywIzg==", + "dependencies": { + "ret": "~0.1.10" + } + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmmirror.com/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "node_modules/sax": { + "version": "1.2.4", + "resolved": "https://registry.npmmirror.com/sax/-/sax-1.2.4.tgz", + "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==" + }, + "node_modules/saxes": { + "version": "5.0.1", + "resolved": "https://registry.npmmirror.com/saxes/-/saxes-5.0.1.tgz", + "integrity": "sha512-5LBh1Tls8c9xgGjw3QrMwETmTMVk0oFgvrFSvWx62llR2hcEInrKNZ2GZCCuuy2lvWrdl5jhbpeqc5hRYKFOcw==", + "dev": true, + "dependencies": { + "xmlchars": "^2.2.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/schema-utils": { + "version": "2.7.1", + "resolved": "https://registry.npmmirror.com/schema-utils/-/schema-utils-2.7.1.tgz", + "integrity": "sha512-SHiNtMOUGWBQJwzISiVYKu82GiV4QYGePp3odlY1tuKO7gPtphAT5R/py0fA6xtbgLL/RvtJZnU9b8s0F1q0Xg==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.5", + "ajv": "^6.12.4", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 8.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/scroll-into-view-if-needed": { + "version": "2.2.31", + "resolved": "https://registry.npmmirror.com/scroll-into-view-if-needed/-/scroll-into-view-if-needed-2.2.31.tgz", + "integrity": "sha512-dGCXy99wZQivjmjIqihaBQNjryrz5rueJY7eHfTdyWEiR4ttYpsajb14rn9s5d4DY4EcY6+4+U/maARBXJedkA==", + "dependencies": { + "compute-scroll-into-view": "^1.0.20" + } + }, + "node_modules/seek-bzip": { + "version": "1.0.6", + "resolved": "https://registry.npmmirror.com/seek-bzip/-/seek-bzip-1.0.6.tgz", + "integrity": "sha512-e1QtP3YL5tWww8uKaOCQ18UxIT2laNBXHjV/S2WYCiK4udiv8lkG89KRIoCjUagnAmCBurjF4zEVX2ByBbnCjQ==", + "dependencies": { + "commander": "^2.8.1" + }, + "bin": { + "seek-bunzip": "bin/seek-bunzip", + "seek-table": "bin/seek-bzip-table" + } + }, + "node_modules/seek-bzip/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmmirror.com/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" + }, + "node_modules/select-hose": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/select-hose/-/select-hose-2.0.0.tgz", + "integrity": "sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg==", + "dev": true + }, + "node_modules/selfsigned": { + "version": "2.4.1", + "resolved": "https://registry.npmmirror.com/selfsigned/-/selfsigned-2.4.1.tgz", + "integrity": "sha512-th5B4L2U+eGLq1TVh7zNRGBapioSORUeymIydxgFpwww9d2qyKvtuPU2jJuHvYAwwqi2Y596QBL3eEqcPEYL8Q==", + "dev": true, + "dependencies": { + "@types/node-forge": "^1.3.0", + "node-forge": "^1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmmirror.com/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/send": { + "version": "0.18.0", + "resolved": "https://registry.npmmirror.com/send/-/send-0.18.0.tgz", + "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmmirror.com/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/send/node_modules/debug/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/send/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmmirror.com/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "node_modules/serialize-javascript": { + "version": "6.0.2", + "resolved": "https://registry.npmmirror.com/serialize-javascript/-/serialize-javascript-6.0.2.tgz", + "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/serve-index": { + "version": "1.9.1", + "resolved": "https://registry.npmmirror.com/serve-index/-/serve-index-1.9.1.tgz", + "integrity": "sha512-pXHfKNP4qujrtteMrSBb0rc8HJ9Ms/GrXwcUtUtD5s4ewDJI8bT3Cz2zTVRMKtri49pLx2e0Ya8ziP5Ya2pZZw==", + "dev": true, + "dependencies": { + "accepts": "~1.3.4", + "batch": "0.6.1", + "debug": "2.6.9", + "escape-html": "~1.0.3", + "http-errors": "~1.6.2", + "mime-types": "~2.1.17", + "parseurl": "~1.3.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/serve-index/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmmirror.com/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dev": true, + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/serve-index/node_modules/depd": { + "version": "1.1.2", + "resolved": "https://registry.npmmirror.com/depd/-/depd-1.1.2.tgz", + "integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==", + "dev": true, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-index/node_modules/http-errors": { + "version": "1.6.3", + "resolved": "https://registry.npmmirror.com/http-errors/-/http-errors-1.6.3.tgz", + "integrity": "sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==", + "dev": true, + "dependencies": { + "depd": "~1.1.2", + "inherits": "2.0.3", + "setprototypeof": "1.1.0", + "statuses": ">= 1.4.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-index/node_modules/inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmmirror.com/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==", + "dev": true + }, + "node_modules/serve-index/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "dev": true + }, + "node_modules/serve-index/node_modules/setprototypeof": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/setprototypeof/-/setprototypeof-1.1.0.tgz", + "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==", + "dev": true + }, + "node_modules/serve-index/node_modules/statuses": { + "version": "1.5.0", + "resolved": "https://registry.npmmirror.com/statuses/-/statuses-1.5.0.tgz", + "integrity": "sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==", + "dev": true, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-static": { + "version": "1.15.0", + "resolved": "https://registry.npmmirror.com/serve-static/-/serve-static-1.15.0.tgz", + "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "dependencies": { + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "0.18.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==", + "optional": true + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmmirror.com/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/set-value": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/set-value/-/set-value-2.0.1.tgz", + "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==", + "dependencies": { + "extend-shallow": "^2.0.1", + "is-extendable": "^0.1.1", + "is-plain-object": "^2.0.3", + "split-string": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/setimmediate": { + "version": "1.0.5", + "resolved": "https://registry.npmmirror.com/setimmediate/-/setimmediate-1.0.5.tgz", + "integrity": "sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA==", + "peer": true + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" + }, + "node_modules/sha.js": { + "version": "2.4.11", + "resolved": "https://registry.npmmirror.com/sha.js/-/sha.js-2.4.11.tgz", + "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", + "dependencies": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + }, + "bin": { + "sha.js": "bin.js" + } + }, + "node_modules/shallow-clone": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/shallow-clone/-/shallow-clone-3.0.1.tgz", + "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", + "dependencies": { + "kind-of": "^6.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shallow-clone/node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmmirror.com/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/shallow-equal": { + "version": "1.2.1", + "resolved": "https://registry.npmmirror.com/shallow-equal/-/shallow-equal-1.2.1.tgz", + "integrity": "sha512-S4vJDjHHMBaiZuT9NPb616CSmLf618jawtv3sufLl6ivK8WocjAo58cXwbRV1cgqxH0Qbv+iUt6m05eqEa2IRA==" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "engines": { + "node": ">=8" + } + }, + "node_modules/shell-quote": { + "version": "1.8.1", + "resolved": "https://registry.npmmirror.com/shell-quote/-/shell-quote-1.8.1.tgz", + "integrity": "sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/shellwords": { + "version": "0.1.1", + "resolved": "https://registry.npmmirror.com/shellwords/-/shellwords-0.1.1.tgz", + "integrity": "sha512-vFwSUfQvqybiICwZY5+DAWIPLKsWO31Q91JSKl3UYv+K5c2QRPzn0qzec6QPu1Qc9eHYItiP3NdJqNVqetYAww==" + }, + "node_modules/shortid": { + "version": "2.2.16", + "resolved": "https://registry.npmmirror.com/shortid/-/shortid-2.2.16.tgz", + "integrity": "sha512-Ugt+GIZqvGXCIItnsL+lvFJOiN7RYqlGy7QE41O3YC1xbNSeDGIRO7xg2JJXIAj1cAGnOeC1r7/T9pgrtQbv4g==", + "deprecated": "Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.", + "dependencies": { + "nanoid": "^2.1.0" + } + }, + "node_modules/shortid/node_modules/nanoid": { + "version": "2.1.11", + "resolved": "https://registry.npmmirror.com/nanoid/-/nanoid-2.1.11.tgz", + "integrity": "sha512-s/snB+WGm6uwi0WjsZdaVcuf3KJXlfGl2LcxgwkEwJF0D/BWzVWAZW/XY4bFaiR7s0Jk3FPvlnepg1H1b1UwlA==" + }, + "node_modules/side-channel": { + "version": "1.0.6", + "resolved": "https://registry.npmmirror.com/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", + "dependencies": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmmirror.com/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==" + }, + "node_modules/simple-concat": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/simple-concat/-/simple-concat-1.0.1.tgz", + "integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "optional": true + }, + "node_modules/simple-get": { + "version": "3.1.1", + "resolved": "https://registry.npmmirror.com/simple-get/-/simple-get-3.1.1.tgz", + "integrity": "sha512-CQ5LTKGfCpvE1K0n2us+kuMPbk/q0EKl82s4aheV9oXjFEz6W/Y7oQFVJuU6QG77hRT4Ghb5RURteF5vnWjupA==", + "optional": true, + "dependencies": { + "decompress-response": "^4.2.0", + "once": "^1.3.1", + "simple-concat": "^1.0.0" + } + }, + "node_modules/simple-get/node_modules/decompress-response": { + "version": "4.2.1", + "resolved": "https://registry.npmmirror.com/decompress-response/-/decompress-response-4.2.1.tgz", + "integrity": "sha512-jOSne2qbyE+/r8G1VU+G/82LBs2Fs4LAsTiLSHOCOMZQl2OKZ6i8i4IyHemTe+/yIXOtTcRQMzPcgyhoFlqPkw==", + "optional": true, + "dependencies": { + "mimic-response": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/simple-get/node_modules/mimic-response": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/mimic-response/-/mimic-response-2.1.0.tgz", + "integrity": "sha512-wXqjST+SLt7R009ySCglWBCFpjUygmCIfD790/kVbiGmUgfYGuB14PiTd5DwVxSV4NcYHjzMkoj5LjQZwTQLEA==", + "optional": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/sirv": { + "version": "2.0.4", + "resolved": "https://registry.npmmirror.com/sirv/-/sirv-2.0.4.tgz", + "integrity": "sha512-94Bdh3cC2PKrbgSOUqTiGPWVZeSiXfKOVZNJniWoqrWrRkB1CJzBU3NEbiTsPcYy1lDsANA/THzS+9WBiy5nfQ==", + "dev": true, + "dependencies": { + "@polka/url": "^1.0.0-next.24", + "mrmime": "^2.0.0", + "totalist": "^3.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmmirror.com/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + "dev": true + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/slice-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/slice-ansi/-/slice-ansi-4.0.0.tgz", + "integrity": "sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "astral-regex": "^2.0.0", + "is-fullwidth-code-point": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/slice-ansi?sponsor=1" + } + }, + "node_modules/slice-ansi/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/slice-ansi/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/slice-ansi/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/snapdragon": { + "version": "0.8.2", + "resolved": "https://registry.npmmirror.com/snapdragon/-/snapdragon-0.8.2.tgz", + "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==", + "dependencies": { + "base": "^0.11.1", + "debug": "^2.2.0", + "define-property": "^0.2.5", + "extend-shallow": "^2.0.1", + "map-cache": "^0.2.2", + "source-map": "^0.5.6", + "source-map-resolve": "^0.5.0", + "use": "^3.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/snapdragon-node": { + "version": "2.1.1", + "resolved": "https://registry.npmmirror.com/snapdragon-node/-/snapdragon-node-2.1.1.tgz", + "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==", + "dependencies": { + "define-property": "^1.0.0", + "isobject": "^3.0.0", + "snapdragon-util": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/snapdragon-node/node_modules/define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/define-property/-/define-property-1.0.0.tgz", + "integrity": "sha512-cZTYKFWspt9jZsMscWo8sc/5lbPC9Q0N5nBLgb+Yd915iL3udB1uFgS3B8YCx66UVHq018DAVFoee7x+gxggeA==", + "dependencies": { + "is-descriptor": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/snapdragon-util": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/snapdragon-util/-/snapdragon-util-3.0.1.tgz", + "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==", + "dependencies": { + "kind-of": "^3.2.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/snapdragon/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmmirror.com/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/snapdragon/node_modules/define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmmirror.com/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha512-Rr7ADjQZenceVOAKop6ALkkRAmH1A4Gx9hV/7ZujPUN2rkATqFO0JZLZInbAjpZYoJ1gUx8MRMQVkYemcbMSTA==", + "dependencies": { + "is-descriptor": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/snapdragon/node_modules/is-descriptor": { + "version": "0.1.7", + "resolved": "https://registry.npmmirror.com/is-descriptor/-/is-descriptor-0.1.7.tgz", + "integrity": "sha512-C3grZTvObeN1xud4cRWl366OMXZTj0+HGyk4hvfpx4ZHt1Pb60ANSXqCK7pdOTeUQpRzECBSTphqvD7U+l22Eg==", + "dependencies": { + "is-accessor-descriptor": "^1.0.1", + "is-data-descriptor": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/snapdragon/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/snapdragon/node_modules/source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmmirror.com/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/snapdragon/node_modules/source-map-resolve": { + "version": "0.5.3", + "resolved": "https://registry.npmmirror.com/source-map-resolve/-/source-map-resolve-0.5.3.tgz", + "integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==", + "deprecated": "See https://github.com/lydell/source-map-resolve#deprecated", + "dependencies": { + "atob": "^2.1.2", + "decode-uri-component": "^0.2.0", + "resolve-url": "^0.2.1", + "source-map-url": "^0.4.0", + "urix": "^0.1.0" + } + }, + "node_modules/sockjs": { + "version": "0.3.24", + "resolved": "https://registry.npmmirror.com/sockjs/-/sockjs-0.3.24.tgz", + "integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==", + "dev": true, + "dependencies": { + "faye-websocket": "^0.11.3", + "uuid": "^8.3.2", + "websocket-driver": "^0.7.4" + } + }, + "node_modules/sockjs/node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmmirror.com/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "dev": true, + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/sort-keys": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/sort-keys/-/sort-keys-2.0.0.tgz", + "integrity": "sha512-/dPCrG1s3ePpWm6yBbxZq5Be1dXGLyLn9Z791chDC3NFrpkVbWGzkBwPN1knaciexFXgRJ7hzdnwZ4stHSDmjg==", + "dependencies": { + "is-plain-obj": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/sort-keys-length": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/sort-keys-length/-/sort-keys-length-1.0.1.tgz", + "integrity": "sha512-GRbEOUqCxemTAk/b32F2xa8wDTs+Z1QHOkbhJDQTvv/6G3ZkbJ+frYWsTcc7cBB3Fu4wy4XlLCuNtJuMn7Gsvw==", + "dependencies": { + "sort-keys": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/sort-keys-length/node_modules/sort-keys": { + "version": "1.1.2", + "resolved": "https://registry.npmmirror.com/sort-keys/-/sort-keys-1.1.2.tgz", + "integrity": "sha512-vzn8aSqKgytVik0iwdBEi+zevbTYZogewTUM6dtpmGwEcdzbub/TX4bCzRhebDCRC3QzXgJsLRKB2V/Oof7HXg==", + "dependencies": { + "is-plain-obj": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-list-map": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/source-list-map/-/source-list-map-2.0.1.tgz", + "integrity": "sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw==", + "peer": true + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmmirror.com/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-js": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/source-map-js/-/source-map-js-1.2.0.tgz", + "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-resolve": { + "version": "0.6.0", + "resolved": "https://registry.npmmirror.com/source-map-resolve/-/source-map-resolve-0.6.0.tgz", + "integrity": "sha512-KXBr9d/fO/bWo97NXsPIAW1bFSBOuCnjbNTBMO7N59hsv5i9yzRDfcYwwt0l04+VqnKC+EwzvJZIP/qkuMgR/w==", + "deprecated": "See https://github.com/lydell/source-map-resolve#deprecated", + "dev": true, + "dependencies": { + "atob": "^2.1.2", + "decode-uri-component": "^0.2.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmmirror.com/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/source-map-url": { + "version": "0.4.1", + "resolved": "https://registry.npmmirror.com/source-map-url/-/source-map-url-0.4.1.tgz", + "integrity": "sha512-cPiFOTLUKvJFIg4SKVScy4ilPPW6rFgMgfuZJPNoDuMs3nC1HbMUycBoJw77xFIp6z1UJQJOfx6C9GMH80DiTw==", + "deprecated": "See https://github.com/lydell/source-map-url#deprecated" + }, + "node_modules/spdx-correct": { + "version": "3.2.0", + "resolved": "https://registry.npmmirror.com/spdx-correct/-/spdx-correct-3.2.0.tgz", + "integrity": "sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==", + "dependencies": { + "spdx-expression-parse": "^3.0.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/spdx-exceptions": { + "version": "2.5.0", + "resolved": "https://registry.npmmirror.com/spdx-exceptions/-/spdx-exceptions-2.5.0.tgz", + "integrity": "sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w==" + }, + "node_modules/spdx-expression-parse": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", + "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", + "dependencies": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/spdx-license-ids": { + "version": "3.0.17", + "resolved": "https://registry.npmmirror.com/spdx-license-ids/-/spdx-license-ids-3.0.17.tgz", + "integrity": "sha512-sh8PWc/ftMqAAdFiBu6Fy6JUOYjqDJBJvIhpfDMyHrr0Rbp5liZqd4TjtQ/RgfLjKFZb+LMx5hpml5qOWy0qvg==" + }, + "node_modules/spdy": { + "version": "4.0.2", + "resolved": "https://registry.npmmirror.com/spdy/-/spdy-4.0.2.tgz", + "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", + "dev": true, + "dependencies": { + "debug": "^4.1.0", + "handle-thing": "^2.0.0", + "http-deceiver": "^1.2.7", + "select-hose": "^2.0.0", + "spdy-transport": "^3.0.0" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/spdy-transport": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/spdy-transport/-/spdy-transport-3.0.0.tgz", + "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", + "dev": true, + "dependencies": { + "debug": "^4.1.0", + "detect-node": "^2.0.4", + "hpack.js": "^2.1.6", + "obuf": "^1.1.2", + "readable-stream": "^3.0.6", + "wbuf": "^1.7.3" + } + }, + "node_modules/spdy-transport/node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmmirror.com/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dev": true, + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/split-string": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/split-string/-/split-string-3.1.0.tgz", + "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==", + "dependencies": { + "extend-shallow": "^3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/split-string/node_modules/extend-shallow": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/extend-shallow/-/extend-shallow-3.0.2.tgz", + "integrity": "sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==", + "dependencies": { + "assign-symbols": "^1.0.0", + "is-extendable": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/split-string/node_modules/is-extendable": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/is-extendable/-/is-extendable-1.0.1.tgz", + "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "dependencies": { + "is-plain-object": "^2.0.4" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmmirror.com/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==" + }, + "node_modules/ssri": { + "version": "8.0.1", + "resolved": "https://registry.npmmirror.com/ssri/-/ssri-8.0.1.tgz", + "integrity": "sha512-97qShzy1AiyxvPNIkLWoGua7xoQzzPjQ0HAH4B0rWKo7SZ6USuPcrUiAFrws0UH8RrbWmgq3LMTObhPIHbbBeQ==", + "dev": true, + "dependencies": { + "minipass": "^3.1.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/stable": { + "version": "0.1.8", + "resolved": "https://registry.npmmirror.com/stable/-/stable-0.1.8.tgz", + "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==", + "deprecated": "Modern JS already guarantees Array#sort() is a stable sort, so this library is deprecated. See the compatibility table on MDN: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/sort#browser_compatibility", + "dev": true + }, + "node_modules/stack-utils": { + "version": "2.0.6", + "resolved": "https://registry.npmmirror.com/stack-utils/-/stack-utils-2.0.6.tgz", + "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", + "dev": true, + "dependencies": { + "escape-string-regexp": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/stack-utils/node_modules/escape-string-regexp": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/stackframe": { + "version": "1.3.4", + "resolved": "https://registry.npmmirror.com/stackframe/-/stackframe-1.3.4.tgz", + "integrity": "sha512-oeVtt7eWQS+Na6F//S4kJ2K2VbRlS9D43mAlMyVpVWovy9o+jfgH8O9agzANzaiLjclA0oYzUXEM4PurhSUChw==", + "dev": true + }, + "node_modules/static-extend": { + "version": "0.1.2", + "resolved": "https://registry.npmmirror.com/static-extend/-/static-extend-0.1.2.tgz", + "integrity": "sha512-72E9+uLc27Mt718pMHt9VMNiAL4LMsmDbBva8mxWUCkT07fSzEGMYUCk0XWY6lp0j6RBAG4cJ3mWuZv2OE3s0g==", + "dependencies": { + "define-property": "^0.2.5", + "object-copy": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/static-extend/node_modules/define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmmirror.com/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha512-Rr7ADjQZenceVOAKop6ALkkRAmH1A4Gx9hV/7ZujPUN2rkATqFO0JZLZInbAjpZYoJ1gUx8MRMQVkYemcbMSTA==", + "dependencies": { + "is-descriptor": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/static-extend/node_modules/is-descriptor": { + "version": "0.1.7", + "resolved": "https://registry.npmmirror.com/is-descriptor/-/is-descriptor-0.1.7.tgz", + "integrity": "sha512-C3grZTvObeN1xud4cRWl366OMXZTj0+HGyk4hvfpx4ZHt1Pb60ANSXqCK7pdOTeUQpRzECBSTphqvD7U+l22Eg==", + "dependencies": { + "is-accessor-descriptor": "^1.0.1", + "is-data-descriptor": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/statuses": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/statuses/-/statuses-2.0.1.tgz", + "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/steno": { + "version": "0.4.4", + "resolved": "https://registry.npmmirror.com/steno/-/steno-0.4.4.tgz", + "integrity": "sha512-EEHMVYHNXFHfGtgjNITnka0aHhiAlo93F7z2/Pwd+g0teG9CnM3JIINM7hVVB5/rhw9voufD7Wukwgtw2uqh6w==", + "dependencies": { + "graceful-fs": "^4.1.3" + } + }, + "node_modules/stream-browserify": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/stream-browserify/-/stream-browserify-2.0.2.tgz", + "integrity": "sha512-nX6hmklHs/gr2FuxYDltq8fJA1GDlxKQCz8O/IM4atRqBH8OORmBNgfvW5gG10GT/qQ9u0CzIvr2X5Pkt6ntqg==", + "peer": true, + "dependencies": { + "inherits": "~2.0.1", + "readable-stream": "^2.0.2" + } + }, + "node_modules/stream-each": { + "version": "1.2.3", + "resolved": "https://registry.npmmirror.com/stream-each/-/stream-each-1.2.3.tgz", + "integrity": "sha512-vlMC2f8I2u/bZGqkdfLQW/13Zihpej/7PmSiMQsbYddxuTsJp8vRe2x2FvVExZg7FaOds43ROAuFJwPR4MTZLw==", + "peer": true, + "dependencies": { + "end-of-stream": "^1.1.0", + "stream-shift": "^1.0.0" + } + }, + "node_modules/stream-http": { + "version": "2.8.3", + "resolved": "https://registry.npmmirror.com/stream-http/-/stream-http-2.8.3.tgz", + "integrity": "sha512-+TSkfINHDo4J+ZobQLWiMouQYB+UVYFttRA94FpEzzJ7ZdqcL4uUUQ7WkdkI4DSozGmgBUE/a47L+38PenXhUw==", + "peer": true, + "dependencies": { + "builtin-status-codes": "^3.0.0", + "inherits": "^2.0.1", + "readable-stream": "^2.3.6", + "to-arraybuffer": "^1.0.0", + "xtend": "^4.0.0" + } + }, + "node_modules/stream-shift": { + "version": "1.0.3", + "resolved": "https://registry.npmmirror.com/stream-shift/-/stream-shift-1.0.3.tgz", + "integrity": "sha512-76ORR0DO1o1hlKwTbi/DM3EXWGf3ZJYO8cXX5RJwnul2DEg2oyoZyjLNoQM8WsvZiFKCRfC1O0J7iCvie3RZmQ==", + "peer": true + }, + "node_modules/strict-uri-encode": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz", + "integrity": "sha512-R3f198pcvnB+5IpnBlRkphuE9n46WyVl8I39W/ZUTZLz4nqSP/oLYUrcnJrw462Ds8he4YKMov2efsTIw1BDGQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmmirror.com/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/string_decoder/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmmirror.com/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/string-length": { + "version": "4.0.2", + "resolved": "https://registry.npmmirror.com/string-length/-/string-length-4.0.2.tgz", + "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", + "dev": true, + "dependencies": { + "char-regex": "^1.0.2", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmmirror.com/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmmirror.com/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/strip-bom/-/strip-bom-4.0.0.tgz", + "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-dirs": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/strip-dirs/-/strip-dirs-2.1.0.tgz", + "integrity": "sha512-JOCxOeKLm2CAS73y/U4ZeZPTkE+gNVCzKt7Eox84Iej1LT/2pTWYpZKJuxwQpvX1LiZb1xokNR7RLfuBAa7T3g==", + "dependencies": { + "is-natural-number": "^4.0.1" + } + }, + "node_modules/strip-eof": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/strip-eof/-/strip-eof-1.0.0.tgz", + "integrity": "sha512-7FCwGGmx8mD5xQd3RPUvnSpUXHM3BWuzjtpD4TXsfcZ9EL4azvVVUscFYwD9nx8Kh+uCBC00XBtAykoMHwTh8Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-indent": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/strip-indent/-/strip-indent-2.0.0.tgz", + "integrity": "sha512-RsSNPLpq6YUL7QYy44RnPVTn/lcVZtb48Uof3X5JLbF4zD/Gs7ZFDv2HWol+leoQN2mT86LAzSshGfkTlSOpsA==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmmirror.com/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/strip-outer": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/strip-outer/-/strip-outer-1.0.1.tgz", + "integrity": "sha512-k55yxKHwaXnpYGsOzg4Vl8+tDrWylxDEpknGjhTiZB8dFRU5rTo9CAzeycivxV3s+zlTKwrs6WxMxR95n26kwg==", + "dependencies": { + "escape-string-regexp": "^1.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stylehacks": { + "version": "5.1.1", + "resolved": "https://registry.npmmirror.com/stylehacks/-/stylehacks-5.1.1.tgz", + "integrity": "sha512-sBpcd5Hx7G6seo7b1LkpttvTz7ikD0LlH5RmdcBNb6fFR0Fl7LQwHDFr300q4cwUqi+IYrFGmsIHieMBfnN/Bw==", + "dev": true, + "dependencies": { + "browserslist": "^4.21.4", + "postcss-selector-parser": "^6.0.4" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/stylis": { + "version": "4.3.2", + "resolved": "https://registry.npmmirror.com/stylis/-/stylis-4.3.2.tgz", + "integrity": "sha512-bhtUjWd/z6ltJiQwg0dUfxEJ+W+jdqQd8TbWLWyeIJHlnsqmGLRFFd8e5mA0AZi/zx90smXRlN66YMTcaSFifg==" + }, + "node_modules/stylus": { + "version": "0.55.0", + "resolved": "https://registry.npmmirror.com/stylus/-/stylus-0.55.0.tgz", + "integrity": "sha512-MuzIIVRSbc8XxHH7FjkvWqkIcr1BvoMZoR/oFuAJDlh7VSaNJzrB4uJ38GRQa+mWjLXODAMzeDe0xi9GYbGwnw==", + "dev": true, + "dependencies": { + "css": "^3.0.0", + "debug": "~3.1.0", + "glob": "^7.1.6", + "mkdirp": "~1.0.4", + "safer-buffer": "^2.1.2", + "sax": "~1.2.4", + "semver": "^6.3.0", + "source-map": "^0.7.3" + }, + "bin": { + "stylus": "bin/stylus" + }, + "engines": { + "node": "*" + } + }, + "node_modules/stylus-loader": { + "version": "6.2.0", + "resolved": "https://registry.npmmirror.com/stylus-loader/-/stylus-loader-6.2.0.tgz", + "integrity": "sha512-5dsDc7qVQGRoc6pvCL20eYgRUxepZ9FpeK28XhdXaIPP6kXr6nI1zAAKFQgP5OBkOfKaURp4WUpJzspg1f01Gg==", + "dev": true, + "dependencies": { + "fast-glob": "^3.2.7", + "klona": "^2.0.4", + "normalize-path": "^3.0.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "stylus": ">=0.52.4", + "webpack": "^5.0.0" + } + }, + "node_modules/stylus/node_modules/debug": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/debug/-/debug-3.1.0.tgz", + "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "dev": true, + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/stylus/node_modules/mkdirp": { + "version": "1.0.4", + "resolved": "https://registry.npmmirror.com/mkdirp/-/mkdirp-1.0.4.tgz", + "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", + "dev": true, + "bin": { + "mkdirp": "bin/cmd.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/stylus/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "dev": true + }, + "node_modules/stylus/node_modules/source-map": { + "version": "0.7.4", + "resolved": "https://registry.npmmirror.com/source-map/-/source-map-0.7.4.tgz", + "integrity": "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/subscriptions-transport-ws": { + "version": "0.11.0", + "resolved": "https://registry.npmmirror.com/subscriptions-transport-ws/-/subscriptions-transport-ws-0.11.0.tgz", + "integrity": "sha512-8D4C6DIH5tGiAIpp5I0wD/xRlNiZAPGHygzCe7VzyzUoxHtawzjNAY9SUTXU05/EY2NMY9/9GF0ycizkXr1CWQ==", + "deprecated": "The `subscriptions-transport-ws` package is no longer maintained. We recommend you use `graphql-ws` instead. For help migrating Apollo software to `graphql-ws`, see https://www.apollographql.com/docs/apollo-server/data/subscriptions/#switching-from-subscriptions-transport-ws For general help using `graphql-ws`, see https://github.com/enisdenjo/graphql-ws/blob/master/README.md", + "dependencies": { + "backo2": "^1.0.2", + "eventemitter3": "^3.1.0", + "iterall": "^1.2.1", + "symbol-observable": "^1.0.4", + "ws": "^5.2.0 || ^6.0.0 || ^7.0.0" + }, + "peerDependencies": { + "graphql": "^15.7.2 || ^16.0.0" + } + }, + "node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/supports-hyperlinks": { + "version": "2.3.0", + "resolved": "https://registry.npmmirror.com/supports-hyperlinks/-/supports-hyperlinks-2.3.0.tgz", + "integrity": "sha512-RpsAZlpWcDwOPQA22aCH4J0t7L8JmAvsCxfOSEwm7cQs3LshN36QaTkwd70DnBOXDWGssw2eUoc8CaRWT0XunA==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0", + "supports-color": "^7.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-hyperlinks/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-hyperlinks/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/svg.draggable.js": { + "version": "2.2.2", + "resolved": "https://registry.npmmirror.com/svg.draggable.js/-/svg.draggable.js-2.2.2.tgz", + "integrity": "sha512-JzNHBc2fLQMzYCZ90KZHN2ohXL0BQJGQimK1kGk6AvSeibuKcIdDX9Kr0dT9+UJ5O8nYA0RB839Lhvk4CY4MZw==", + "dependencies": { + "svg.js": "^2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/svg.easing.js": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/svg.easing.js/-/svg.easing.js-2.0.0.tgz", + "integrity": "sha512-//ctPdJMGy22YoYGV+3HEfHbm6/69LJUTAqI2/5qBvaNHZ9uUFVC82B0Pl299HzgH13rKrBgi4+XyXXyVWWthA==", + "dependencies": { + "svg.js": ">=2.3.x" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/svg.filter.js": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/svg.filter.js/-/svg.filter.js-2.0.2.tgz", + "integrity": "sha512-xkGBwU+dKBzqg5PtilaTb0EYPqPfJ9Q6saVldX+5vCRy31P6TlRCP3U9NxH3HEufkKkpNgdTLBJnmhDHeTqAkw==", + "dependencies": { + "svg.js": "^2.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/svg.js": { + "version": "2.7.1", + "resolved": "https://registry.npmmirror.com/svg.js/-/svg.js-2.7.1.tgz", + "integrity": "sha512-ycbxpizEQktk3FYvn/8BH+6/EuWXg7ZpQREJvgacqn46gIddG24tNNe4Son6omdXCnSOaApnpZw6MPCBA1dODA==" + }, + "node_modules/svg.pathmorphing.js": { + "version": "0.1.3", + "resolved": "https://registry.npmmirror.com/svg.pathmorphing.js/-/svg.pathmorphing.js-0.1.3.tgz", + "integrity": "sha512-49HWI9X4XQR/JG1qXkSDV8xViuTLIWm/B/7YuQELV5KMOPtXjiwH4XPJvr/ghEDibmLQ9Oc22dpWpG0vUDDNww==", + "dependencies": { + "svg.js": "^2.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/svg.resize.js": { + "version": "1.4.3", + "resolved": "https://registry.npmmirror.com/svg.resize.js/-/svg.resize.js-1.4.3.tgz", + "integrity": "sha512-9k5sXJuPKp+mVzXNvxz7U0uC9oVMQrrf7cFsETznzUDDm0x8+77dtZkWdMfRlmbkEEYvUn9btKuZ3n41oNA+uw==", + "dependencies": { + "svg.js": "^2.6.5", + "svg.select.js": "^2.1.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/svg.resize.js/node_modules/svg.select.js": { + "version": "2.1.2", + "resolved": "https://registry.npmmirror.com/svg.select.js/-/svg.select.js-2.1.2.tgz", + "integrity": "sha512-tH6ABEyJsAOVAhwcCjF8mw4crjXSI1aa7j2VQR8ZuJ37H2MBUbyeqYr5nEO7sSN3cy9AR9DUwNg0t/962HlDbQ==", + "dependencies": { + "svg.js": "^2.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/svg.select.js": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/svg.select.js/-/svg.select.js-3.0.1.tgz", + "integrity": "sha512-h5IS/hKkuVCbKSieR9uQCj9w+zLHoPh+ce19bBYyqF53g6mnPB8sAtIbe1s9dh2S2fCmYX2xel1Ln3PJBbK4kw==", + "dependencies": { + "svg.js": "^2.6.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/svgo": { + "version": "2.8.0", + "resolved": "https://registry.npmmirror.com/svgo/-/svgo-2.8.0.tgz", + "integrity": "sha512-+N/Q9kV1+F+UeWYoSiULYo4xYSDQlTgb+ayMobAXPwMnLvop7oxKMo9OzIrX5x3eS4L4f2UHhc9axXwY8DpChg==", + "dev": true, + "dependencies": { + "@trysound/sax": "0.2.0", + "commander": "^7.2.0", + "css-select": "^4.1.3", + "css-tree": "^1.1.3", + "csso": "^4.2.0", + "picocolors": "^1.0.0", + "stable": "^0.1.8" + }, + "bin": { + "svgo": "bin/svgo" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/svgo/node_modules/css-tree": { + "version": "1.1.3", + "resolved": "https://registry.npmmirror.com/css-tree/-/css-tree-1.1.3.tgz", + "integrity": "sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q==", + "dev": true, + "dependencies": { + "mdn-data": "2.0.14", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/svgo/node_modules/mdn-data": { + "version": "2.0.14", + "resolved": "https://registry.npmmirror.com/mdn-data/-/mdn-data-2.0.14.tgz", + "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==", + "dev": true + }, + "node_modules/symbol-observable": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/symbol-observable/-/symbol-observable-1.2.0.tgz", + "integrity": "sha512-e900nM8RRtGhlV36KGEU9k65K3mPb1WV70OdjfxlG2EAuM1noi/E/BaW/uMhL7bPEssK8QV57vN3esixjUvcXQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/symbol-tree": { + "version": "3.2.4", + "resolved": "https://registry.npmmirror.com/symbol-tree/-/symbol-tree-3.2.4.tgz", + "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==", + "dev": true + }, + "node_modules/table": { + "version": "6.8.2", + "resolved": "https://registry.npmmirror.com/table/-/table-6.8.2.tgz", + "integrity": "sha512-w2sfv80nrAh2VCbqR5AK27wswXhqcck2AhfnNW76beQXskGZ1V12GwS//yYVa3d3fcvAip2OUnbDAjW2k3v9fA==", + "dev": true, + "dependencies": { + "ajv": "^8.0.1", + "lodash.truncate": "^4.4.2", + "slice-ansi": "^4.0.0", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/table/node_modules/ajv": { + "version": "8.13.0", + "resolved": "https://registry.npmmirror.com/ajv/-/ajv-8.13.0.tgz", + "integrity": "sha512-PRA911Blj99jR5RMeTunVbNXMF6Lp4vZXnk5GQjcnUWUTsrXtekg/pnmFFI2u/I36Y/2bITGS30GZCXei6uNkA==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.3", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.4.1" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/table/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "dev": true + }, + "node_modules/tapable": { + "version": "1.1.3", + "resolved": "https://registry.npmmirror.com/tapable/-/tapable-1.1.3.tgz", + "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/tar": { + "version": "6.2.1", + "resolved": "https://registry.npmmirror.com/tar/-/tar-6.2.1.tgz", + "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", + "optional": true, + "dependencies": { + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^5.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/tar-stream": { + "version": "1.6.2", + "resolved": "https://registry.npmmirror.com/tar-stream/-/tar-stream-1.6.2.tgz", + "integrity": "sha512-rzS0heiNf8Xn7/mpdSVVSMAWAoy9bfb1WOTYC78Z0UQKeKa/CWS8FOq0lKGNa8DWKAn9gxjCvMLYc5PGXYlK2A==", + "dependencies": { + "bl": "^1.0.0", + "buffer-alloc": "^1.2.0", + "end-of-stream": "^1.0.0", + "fs-constants": "^1.0.0", + "readable-stream": "^2.3.0", + "to-buffer": "^1.1.1", + "xtend": "^4.0.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/tar-stream/node_modules/bl": { + "version": "1.2.3", + "resolved": "https://registry.npmmirror.com/bl/-/bl-1.2.3.tgz", + "integrity": "sha512-pvcNpa0UU69UT341rO6AYy4FVAIkUHuZXRIWbq+zHnsVcRzDDjIAhGuuYoi0d//cwIwtt4pkpKycWEfjdV+vww==", + "dependencies": { + "readable-stream": "^2.3.5", + "safe-buffer": "^5.1.1" + } + }, + "node_modules/tar/node_modules/chownr": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/chownr/-/chownr-2.0.0.tgz", + "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==", + "optional": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/tar/node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "optional": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/tar/node_modules/mkdirp": { + "version": "1.0.4", + "resolved": "https://registry.npmmirror.com/mkdirp/-/mkdirp-1.0.4.tgz", + "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", + "optional": true, + "bin": { + "mkdirp": "bin/cmd.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/tar/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "optional": true + }, + "node_modules/taskkill": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/taskkill/-/taskkill-3.1.0.tgz", + "integrity": "sha512-5KcOFzPvd1nGFVrmB7H4+QAWVjYOf//+QTbOj0GpXbqtqbKGWVczG+rq6VhXAtdtlKLTs16NAmHRyF5vbggQ2w==", + "dependencies": { + "arrify": "^2.0.1", + "execa": "^3.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/taskkill/node_modules/execa": { + "version": "3.4.0", + "resolved": "https://registry.npmmirror.com/execa/-/execa-3.4.0.tgz", + "integrity": "sha512-r9vdGQk4bmCuK1yKQu1KTwcT2zwfWdbdaXfCtAh+5nU/4fSX+JAb7vZGvI5naJrQlvONrEB20jeruESI69530g==", + "dependencies": { + "cross-spawn": "^7.0.0", + "get-stream": "^5.0.0", + "human-signals": "^1.1.1", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.0", + "onetime": "^5.1.0", + "p-finally": "^2.0.0", + "signal-exit": "^3.0.2", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": "^8.12.0 || >=9.7.0" + } + }, + "node_modules/taskkill/node_modules/get-stream": { + "version": "5.2.0", + "resolved": "https://registry.npmmirror.com/get-stream/-/get-stream-5.2.0.tgz", + "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", + "dependencies": { + "pump": "^3.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/taskkill/node_modules/human-signals": { + "version": "1.1.1", + "resolved": "https://registry.npmmirror.com/human-signals/-/human-signals-1.1.1.tgz", + "integrity": "sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw==", + "engines": { + "node": ">=8.12.0" + } + }, + "node_modules/taskkill/node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/taskkill/node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmmirror.com/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/taskkill/node_modules/p-finally": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/p-finally/-/p-finally-2.0.1.tgz", + "integrity": "sha512-vpm09aKwq6H9phqRQzecoDpD8TmVyGw70qmWlyq5onxY7tqyTTFVvxMykxQSQKILBSFlbXpypIw2T1Ml7+DDtw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/temp": { + "version": "0.8.4", + "resolved": "https://registry.npmmirror.com/temp/-/temp-0.8.4.tgz", + "integrity": "sha512-s0ZZzd0BzYv5tLSptZooSjK8oj6C+c19p7Vqta9+6NPOf7r+fxq0cJe6/oN4LTC79sy5NY8ucOJNgwsKCSbfqg==", + "dependencies": { + "rimraf": "~2.6.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/temp/node_modules/rimraf": { + "version": "2.6.3", + "resolved": "https://registry.npmmirror.com/rimraf/-/rimraf-2.6.3.tgz", + "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + } + }, + "node_modules/terminal-link": { + "version": "2.1.1", + "resolved": "https://registry.npmmirror.com/terminal-link/-/terminal-link-2.1.1.tgz", + "integrity": "sha512-un0FmiRUQNr5PJqy9kP7c40F5BOfpGlYTrxonDChEZB7pzZxRNp/bt+ymiy9/npwXya9KH99nJ/GXFIiUkYGFQ==", + "dev": true, + "dependencies": { + "ansi-escapes": "^4.2.1", + "supports-hyperlinks": "^2.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/terser": { + "version": "5.31.0", + "resolved": "https://registry.npmmirror.com/terser/-/terser-5.31.0.tgz", + "integrity": "sha512-Q1JFAoUKE5IMfI4Z/lkE/E6+SwgzO+x4tq4v1AyBLRj8VSYvRO6A/rQrPg1yud4g0En9EKI1TvFRF2tQFcoUkg==", + "dependencies": { + "@jridgewell/source-map": "^0.3.3", + "acorn": "^8.8.2", + "commander": "^2.20.0", + "source-map-support": "~0.5.20" + }, + "bin": { + "terser": "bin/terser" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/terser-webpack-plugin": { + "version": "5.3.10", + "resolved": "https://registry.npmmirror.com/terser-webpack-plugin/-/terser-webpack-plugin-5.3.10.tgz", + "integrity": "sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w==", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.20", + "jest-worker": "^27.4.5", + "schema-utils": "^3.1.1", + "serialize-javascript": "^6.0.1", + "terser": "^5.26.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "uglify-js": { + "optional": true + } + } + }, + "node_modules/terser-webpack-plugin/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmmirror.com/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/terser/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmmirror.com/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" + }, + "node_modules/test-exclude": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/test-exclude/-/test-exclude-6.0.0.tgz", + "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", + "dev": true, + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmmirror.com/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true + }, + "node_modules/thenify": { + "version": "3.3.1", + "resolved": "https://registry.npmmirror.com/thenify/-/thenify-3.3.1.tgz", + "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", + "dev": true, + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "resolved": "https://registry.npmmirror.com/thenify-all/-/thenify-all-1.6.0.tgz", + "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", + "dev": true, + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/thread-loader": { + "version": "3.0.4", + "resolved": "https://registry.npmmirror.com/thread-loader/-/thread-loader-3.0.4.tgz", + "integrity": "sha512-ByaL2TPb+m6yArpqQUZvP+5S1mZtXsEP7nWKKlAUTm7fCml8kB5s1uI3+eHRP2bk5mVYfRSBI7FFf+tWEyLZwA==", + "dev": true, + "dependencies": { + "json-parse-better-errors": "^1.0.2", + "loader-runner": "^4.1.0", + "loader-utils": "^2.0.0", + "neo-async": "^2.6.2", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.27.0 || ^5.0.0" + } + }, + "node_modules/thread-loader/node_modules/loader-utils": { + "version": "2.0.4", + "resolved": "https://registry.npmmirror.com/loader-utils/-/loader-utils-2.0.4.tgz", + "integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==", + "dev": true, + "dependencies": { + "big.js": "^5.2.2", + "emojis-list": "^3.0.0", + "json5": "^2.1.2" + }, + "engines": { + "node": ">=8.9.0" + } + }, + "node_modules/thread-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmmirror.com/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/throat": { + "version": "6.0.2", + "resolved": "https://registry.npmmirror.com/throat/-/throat-6.0.2.tgz", + "integrity": "sha512-WKexMoJj3vEuK0yFEapj8y64V0A6xcuPuK9Gt1d0R+dzCSJc0lHqQytAbSB4cDAK0dWh4T0E2ETkoLE2WZ41OQ==", + "dev": true + }, + "node_modules/throttle-debounce": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/throttle-debounce/-/throttle-debounce-5.0.0.tgz", + "integrity": "sha512-2iQTSgkkc1Zyk0MeVrt/3BvuOXYPl/R8Z0U2xxo9rjwNciaHDG3R+Lm6dh4EeUci49DanvBnuqI6jshoQQRGEg==", + "engines": { + "node": ">=12.22" + } + }, + "node_modules/through": { + "version": "2.3.8", + "resolved": "https://registry.npmmirror.com/through/-/through-2.3.8.tgz", + "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==" + }, + "node_modules/through2": { + "version": "2.0.5", + "resolved": "https://registry.npmmirror.com/through2/-/through2-2.0.5.tgz", + "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", + "peer": true, + "dependencies": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + } + }, + "node_modules/thunky": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/thunky/-/thunky-1.1.0.tgz", + "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==", + "dev": true + }, + "node_modules/timed-out": { + "version": "4.0.1", + "resolved": "https://registry.npmmirror.com/timed-out/-/timed-out-4.0.1.tgz", + "integrity": "sha512-G7r3AhovYtr5YKOWQkta8RKAPb+J9IsO4uVmzjl8AZwfhs8UcUwTiD6gcJYSgOtzyjvQKrKYn41syHbUWMkafA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/timers-browserify": { + "version": "2.0.12", + "resolved": "https://registry.npmmirror.com/timers-browserify/-/timers-browserify-2.0.12.tgz", + "integrity": "sha512-9phl76Cqm6FhSX9Xe1ZUAMLtm1BLkKj2Qd5ApyWkXzsMRaA7dgr81kf4wJmQf/hAvg8EEyJxDo3du/0KlhPiKQ==", + "peer": true, + "dependencies": { + "setimmediate": "^1.0.4" + }, + "engines": { + "node": ">=0.6.0" + } + }, + "node_modules/tmp": { + "version": "0.0.33", + "resolved": "https://registry.npmmirror.com/tmp/-/tmp-0.0.33.tgz", + "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", + "dependencies": { + "os-tmpdir": "~1.0.2" + }, + "engines": { + "node": ">=0.6.0" + } + }, + "node_modules/tmpl": { + "version": "1.0.5", + "resolved": "https://registry.npmmirror.com/tmpl/-/tmpl-1.0.5.tgz", + "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", + "dev": true + }, + "node_modules/to-arraybuffer": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz", + "integrity": "sha512-okFlQcoGTi4LQBG/PgSYblw9VOyptsz2KJZqc6qtgGdes8VktzUQkj4BI2blit072iS8VODNcMA+tvnS9dnuMA==", + "peer": true + }, + "node_modules/to-buffer": { + "version": "1.1.1", + "resolved": "https://registry.npmmirror.com/to-buffer/-/to-buffer-1.1.1.tgz", + "integrity": "sha512-lx9B5iv7msuFYE3dytT+KE5tap+rNYw+K4jVkb9R/asAb+pbBSM17jtunHplhBe6RRJdZx3Pn2Jph24O32mOVg==" + }, + "node_modules/to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", + "engines": { + "node": ">=4" + } + }, + "node_modules/to-object-path": { + "version": "0.3.0", + "resolved": "https://registry.npmmirror.com/to-object-path/-/to-object-path-0.3.0.tgz", + "integrity": "sha512-9mWHdnGRuh3onocaHzukyvCZhzvr6tiflAy/JRFXcJX0TjgfWA9pk9t8CMbzmBE4Jfw58pXbkngtBtqYxzNEyg==", + "dependencies": { + "kind-of": "^3.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/to-regex": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/to-regex/-/to-regex-3.0.2.tgz", + "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==", + "dependencies": { + "define-property": "^2.0.2", + "extend-shallow": "^3.0.2", + "regex-not": "^1.0.2", + "safe-regex": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmmirror.com/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/to-regex/node_modules/extend-shallow": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/extend-shallow/-/extend-shallow-3.0.2.tgz", + "integrity": "sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==", + "dependencies": { + "assign-symbols": "^1.0.0", + "is-extendable": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/to-regex/node_modules/is-extendable": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/is-extendable/-/is-extendable-1.0.1.tgz", + "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "dependencies": { + "is-plain-object": "^2.0.4" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/totalist": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/totalist/-/totalist-3.0.1.tgz", + "integrity": "sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/tough-cookie": { + "version": "4.1.4", + "resolved": "https://registry.npmmirror.com/tough-cookie/-/tough-cookie-4.1.4.tgz", + "integrity": "sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==", + "dev": true, + "dependencies": { + "psl": "^1.1.33", + "punycode": "^2.1.1", + "universalify": "^0.2.0", + "url-parse": "^1.5.3" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/tough-cookie/node_modules/universalify": { + "version": "0.2.0", + "resolved": "https://registry.npmmirror.com/universalify/-/universalify-0.2.0.tgz", + "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==", + "dev": true, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/tr46": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/tr46/-/tr46-2.1.0.tgz", + "integrity": "sha512-15Ih7phfcdP5YxqiB+iDtLoaTz4Nd35+IiAv0kQ5FNKHzXgdWqPoTIqEDDJmXceQt4JZk6lVPT8lnDlPpGDppw==", + "dev": true, + "dependencies": { + "punycode": "^2.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/trim-repeated": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/trim-repeated/-/trim-repeated-1.0.0.tgz", + "integrity": "sha512-pkonvlKk8/ZuR0D5tLW8ljt5I8kmxp2XKymhepUeOdCEfKpZaktSArkLHZt76OB1ZvO9bssUsDty4SWhLvZpLg==", + "dependencies": { + "escape-string-regexp": "^1.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ts-jest": { + "version": "27.1.5", + "resolved": "https://registry.npmmirror.com/ts-jest/-/ts-jest-27.1.5.tgz", + "integrity": "sha512-Xv6jBQPoBEvBq/5i2TeSG9tt/nqkbpcurrEG1b+2yfBrcJelOZF9Ml6dmyMh7bcW9JyFbRYpR5rxROSlBLTZHA==", + "dev": true, + "dependencies": { + "bs-logger": "0.x", + "fast-json-stable-stringify": "2.x", + "jest-util": "^27.0.0", + "json5": "2.x", + "lodash.memoize": "4.x", + "make-error": "1.x", + "semver": "7.x", + "yargs-parser": "20.x" + }, + "bin": { + "ts-jest": "cli.js" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + }, + "peerDependencies": { + "@babel/core": ">=7.0.0-beta.0 <8", + "@types/jest": "^27.0.0", + "babel-jest": ">=27.0.0 <28", + "jest": "^27.0.0", + "typescript": ">=3.8 <5.0" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "@types/jest": { + "optional": true + }, + "babel-jest": { + "optional": true + }, + "esbuild": { + "optional": true + } + } + }, + "node_modules/ts-jest/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/ts-loader": { + "version": "9.5.1", + "resolved": "https://registry.npmmirror.com/ts-loader/-/ts-loader-9.5.1.tgz", + "integrity": "sha512-rNH3sK9kGZcH9dYzC7CewQm4NtxJTjSEVRJ2DyBZR7f8/wcta+iV44UPCXc5+nzDzivKtlzV6c9P4e+oFhDLYg==", + "dev": true, + "dependencies": { + "chalk": "^4.1.0", + "enhanced-resolve": "^5.0.0", + "micromatch": "^4.0.0", + "semver": "^7.3.4", + "source-map": "^0.7.4" + }, + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "typescript": "*", + "webpack": "^5.0.0" + } + }, + "node_modules/ts-loader/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/ts-loader/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/ts-loader/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/ts-loader/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/ts-loader/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/ts-loader/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/ts-loader/node_modules/source-map": { + "version": "0.7.4", + "resolved": "https://registry.npmmirror.com/source-map/-/source-map-0.7.4.tgz", + "integrity": "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/ts-loader/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/tsconfig": { + "version": "7.0.0", + "resolved": "https://registry.npmmirror.com/tsconfig/-/tsconfig-7.0.0.tgz", + "integrity": "sha512-vZXmzPrL+EmC4T/4rVlT2jNVMWCi/O4DIiSj3UHg1OE5kCKbk4mfrXc6dZksLgRM/TZlKnousKH9bbTazUWRRw==", + "dev": true, + "dependencies": { + "@types/strip-bom": "^3.0.0", + "@types/strip-json-comments": "0.0.30", + "strip-bom": "^3.0.0", + "strip-json-comments": "^2.0.0" + } + }, + "node_modules/tsconfig/node_modules/strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/tsconfig/node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/tslib": { + "version": "2.6.2", + "resolved": "https://registry.npmmirror.com/tslib/-/tslib-2.6.2.tgz", + "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" + }, + "node_modules/tsutils": { + "version": "3.21.0", + "resolved": "https://registry.npmmirror.com/tsutils/-/tsutils-3.21.0.tgz", + "integrity": "sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==", + "dev": true, + "dependencies": { + "tslib": "^1.8.1" + }, + "engines": { + "node": ">= 6" + }, + "peerDependencies": { + "typescript": ">=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta" + } + }, + "node_modules/tsutils/node_modules/tslib": { + "version": "1.14.1", + "resolved": "https://registry.npmmirror.com/tslib/-/tslib-1.14.1.tgz", + "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", + "dev": true + }, + "node_modules/tty-browserify": { + "version": "0.0.0", + "resolved": "https://registry.npmmirror.com/tty-browserify/-/tty-browserify-0.0.0.tgz", + "integrity": "sha512-JVa5ijo+j/sOoHGjw0sxw734b1LhBkQ3bvUGNdxnVXDCX81Yx7TFgnZygxrIIWn23hbfTaMYLwRmAxFyDuFmIw==", + "peer": true + }, + "node_modules/tunnel-agent": { + "version": "0.6.0", + "resolved": "https://registry.npmmirror.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz", + "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", + "dependencies": { + "safe-buffer": "^5.0.1" + }, + "engines": { + "node": "*" + } + }, + "node_modules/type": { + "version": "2.7.2", + "resolved": "https://registry.npmmirror.com/type/-/type-2.7.2.tgz", + "integrity": "sha512-dzlvlNlt6AXU7EBSfpAscydQ7gXB+pPGsPnfJnZpiNJBDj7IaJzQlBZYGdEi4R9HmPdBv2XmWJ6YUtoTa7lmCw==" + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmmirror.com/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-detect": { + "version": "4.0.8", + "resolved": "https://registry.npmmirror.com/type-detect/-/type-detect-4.0.8.tgz", + "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmmirror.com/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmmirror.com/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/typedarray": { + "version": "0.0.6", + "resolved": "https://registry.npmmirror.com/typedarray/-/typedarray-0.0.6.tgz", + "integrity": "sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA==", + "peer": true + }, + "node_modules/typedarray-to-buffer": { + "version": "3.1.5", + "resolved": "https://registry.npmmirror.com/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", + "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", + "dependencies": { + "is-typedarray": "^1.0.0" + } + }, + "node_modules/typescript": { + "version": "4.5.5", + "resolved": "https://registry.npmmirror.com/typescript/-/typescript-4.5.5.tgz", + "integrity": "sha512-TCTIul70LyWe6IJWT8QSYeA54WQe8EjQFU4wY52Fasj5UKx88LNYKCgBEHcOMOrFF1rKGbD8v/xcNWVUq9SymA==", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=4.2.0" + } + }, + "node_modules/unbzip2-stream": { + "version": "1.4.3", + "resolved": "https://registry.npmmirror.com/unbzip2-stream/-/unbzip2-stream-1.4.3.tgz", + "integrity": "sha512-mlExGW4w71ebDJviH16lQLtZS32VKqsSfk80GCfUlwT/4/hNRFsoscrF/c++9xinkMzECL1uL9DDwXqFWkruPg==", + "dependencies": { + "buffer": "^5.2.1", + "through": "^2.3.8" + } + }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmmirror.com/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, + "node_modules/unicode-canonical-property-names-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", + "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", + "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", + "dependencies": { + "unicode-canonical-property-names-ecmascript": "^2.0.0", + "unicode-property-aliases-ecmascript": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-value-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz", + "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-property-aliases-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", + "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==", + "engines": { + "node": ">=4" + } + }, + "node_modules/union-value": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/union-value/-/union-value-1.0.1.tgz", + "integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==", + "dependencies": { + "arr-union": "^3.1.0", + "get-value": "^2.0.6", + "is-extendable": "^0.1.1", + "set-value": "^2.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/unique-filename": { + "version": "1.1.1", + "resolved": "https://registry.npmmirror.com/unique-filename/-/unique-filename-1.1.1.tgz", + "integrity": "sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ==", + "peer": true, + "dependencies": { + "unique-slug": "^2.0.0" + } + }, + "node_modules/unique-slug": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/unique-slug/-/unique-slug-2.0.2.tgz", + "integrity": "sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w==", + "peer": true, + "dependencies": { + "imurmurhash": "^0.1.4" + } + }, + "node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/unset-value": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/unset-value/-/unset-value-1.0.0.tgz", + "integrity": "sha512-PcA2tsuGSF9cnySLHTLSh2qrQiJ70mn+r+Glzxv2TWZblxsxCC52BDlZoPCsz7STd9pN7EZetkWZBAvk4cgZdQ==", + "dependencies": { + "has-value": "^0.3.1", + "isobject": "^3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/unset-value/node_modules/has-value": { + "version": "0.3.1", + "resolved": "https://registry.npmmirror.com/has-value/-/has-value-0.3.1.tgz", + "integrity": "sha512-gpG936j8/MzaeID5Yif+577c17TxaDmhuyVgSwtnL/q8UUTySg8Mecb+8Cf1otgLoD7DDH75axp86ER7LFsf3Q==", + "dependencies": { + "get-value": "^2.0.3", + "has-values": "^0.1.4", + "isobject": "^2.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/unset-value/node_modules/has-value/node_modules/isobject": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/isobject/-/isobject-2.1.0.tgz", + "integrity": "sha512-+OUdGJlgjOBZDfxnDjYYG6zp487z0JGNQq3cYQYg5f5hKR+syHMsaztzGeml/4kGG55CSpKSpWTY+jYGgsHLgA==", + "dependencies": { + "isarray": "1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/unset-value/node_modules/has-values": { + "version": "0.1.4", + "resolved": "https://registry.npmmirror.com/has-values/-/has-values-0.1.4.tgz", + "integrity": "sha512-J8S0cEdWuQbqD9//tlZxiMuMNmxB8PlEwvYwuxsTmR1G5RXUePEX/SJn7aD0GMLieuZYSwNH0cQuJGwnYunXRQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/upath": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/upath/-/upath-1.2.0.tgz", + "integrity": "sha512-aZwGpamFO61g3OlfT7OQCHqhGnW43ieH9WZeP7QxN/G/jS4jfqUkZxoryvJgVPEcrl5NL/ggHsSmLMHuH64Lhg==", + "optional": true, + "peer": true, + "engines": { + "node": ">=4", + "yarn": "*" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.0.16", + "resolved": "https://registry.npmmirror.com/update-browserslist-db/-/update-browserslist-db-1.0.16.tgz", + "integrity": "sha512-KVbTxlBYlckhF5wgfyZXTWnMn7MMZjMu9XG8bPlliUOP9ThaF4QnhP8qrjrH7DRzHfSk0oQv1wToW+iA5GajEQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "escalade": "^3.1.2", + "picocolors": "^1.0.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmmirror.com/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/urix": { + "version": "0.1.0", + "resolved": "https://registry.npmmirror.com/urix/-/urix-0.1.0.tgz", + "integrity": "sha512-Am1ousAhSLBeB9cG/7k7r2R0zj50uDRlZHPGbazid5s9rlF1F/QKYObEKSIunSjIOkJZqwRRLpvewjEkM7pSqg==", + "deprecated": "Please see https://github.com/lydell/urix#deprecated" + }, + "node_modules/url": { + "version": "0.11.3", + "resolved": "https://registry.npmmirror.com/url/-/url-0.11.3.tgz", + "integrity": "sha512-6hxOLGfZASQK/cijlZnZJTq8OXAkt/3YGfQX45vvMYXpZoo8NdWZcY73K108Jf759lS1Bv/8wXnHDTSz17dSRw==", + "peer": true, + "dependencies": { + "punycode": "^1.4.1", + "qs": "^6.11.2" + } + }, + "node_modules/url-parse": { + "version": "1.5.10", + "resolved": "https://registry.npmmirror.com/url-parse/-/url-parse-1.5.10.tgz", + "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", + "dev": true, + "dependencies": { + "querystringify": "^2.1.1", + "requires-port": "^1.0.0" + } + }, + "node_modules/url-parse-lax": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/url-parse-lax/-/url-parse-lax-3.0.0.tgz", + "integrity": "sha512-NjFKA0DidqPa5ciFcSrXnAltTtzz84ogy+NebPvfEgAck0+TNg4UJ4IN+fB7zRZfbgUf0syOo9MDxFkDSMuFaQ==", + "dependencies": { + "prepend-http": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/url-to-options": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/url-to-options/-/url-to-options-1.0.1.tgz", + "integrity": "sha512-0kQLIzG4fdk/G5NONku64rSH/x32NOA39LVQqlK8Le6lvTF6GGRJpqaQFGgU+CLwySIqBSMdwYM0sYcW9f6P4A==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/url/node_modules/punycode": { + "version": "1.4.1", + "resolved": "https://registry.npmmirror.com/punycode/-/punycode-1.4.1.tgz", + "integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ==", + "peer": true + }, + "node_modules/url/node_modules/qs": { + "version": "6.12.1", + "resolved": "https://registry.npmmirror.com/qs/-/qs-6.12.1.tgz", + "integrity": "sha512-zWmv4RSuB9r2mYQw3zxQuHWeU+42aKi1wWig/j4ele4ygELZ7PEO6MM7rim9oAQH2A5MWfsAVf/jPvTPgCbvUQ==", + "peer": true, + "dependencies": { + "side-channel": "^1.0.6" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/use": { + "version": "3.1.1", + "resolved": "https://registry.npmmirror.com/use/-/use-3.1.1.tgz", + "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/utf-8-validate": { + "version": "5.0.10", + "resolved": "https://registry.npmmirror.com/utf-8-validate/-/utf-8-validate-5.0.10.tgz", + "integrity": "sha512-Z6czzLq4u8fPOyx7TU6X3dvUZVvoJmxSQ+IcrlmagKhilxlhZgxPK6C5Jqbkw1IDUmFTM+cz9QDnnLTwDz/2gQ==", + "hasInstallScript": true, + "dependencies": { + "node-gyp-build": "^4.3.0" + }, + "engines": { + "node": ">=6.14.2" + } + }, + "node_modules/util": { + "version": "0.12.5", + "resolved": "https://registry.npmmirror.com/util/-/util-0.12.5.tgz", + "integrity": "sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA==", + "dependencies": { + "inherits": "^2.0.3", + "is-arguments": "^1.0.4", + "is-generator-function": "^1.0.7", + "is-typed-array": "^1.1.3", + "which-typed-array": "^1.1.2" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, + "node_modules/utila": { + "version": "0.4.0", + "resolved": "https://registry.npmmirror.com/utila/-/utila-0.4.0.tgz", + "integrity": "sha512-Z0DbgELS9/L/75wZbro8xAnT50pBVFQZ+hUEueGDU5FN51YSCYM+jdxsfCiHjwNP/4LCDD0i/graKpeBnOXKRA==", + "dev": true + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/uuid": { + "version": "9.0.1", + "resolved": "https://registry.npmmirror.com/uuid/-/uuid-9.0.1.tgz", + "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/v-clipboard": { + "version": "3.0.0-next.1", + "resolved": "https://registry.npmmirror.com/v-clipboard/-/v-clipboard-3.0.0-next.1.tgz", + "integrity": "sha512-UvCnzetQMlVfk9yoiyew8ldGiCzeER5aYdmXXtZp8LC6rt2QXQS0AayEDn1K7rlXpd3M8d+JeYNUV+ZNgtaS4A==", + "dependencies": { + "vue": "^3.2.45" + } + }, + "node_modules/v8-compile-cache": { + "version": "2.4.0", + "resolved": "https://registry.npmmirror.com/v8-compile-cache/-/v8-compile-cache-2.4.0.tgz", + "integrity": "sha512-ocyWc3bAHBB/guyqJQVI5o4BZkPhznPYUG2ea80Gond/BgNWpap8TOmLSeeQG7bnh2KMISxskdADG59j7zruhw==", + "dev": true + }, + "node_modules/v8-to-istanbul": { + "version": "8.1.1", + "resolved": "https://registry.npmmirror.com/v8-to-istanbul/-/v8-to-istanbul-8.1.1.tgz", + "integrity": "sha512-FGtKtv3xIpR6BYhvgH8MI/y78oT7d8Au3ww4QIxymrCtZEh5b8gCw2siywE+puhEmuWKDtmfrvF5UlB298ut3w==", + "dev": true, + "dependencies": { + "@types/istanbul-lib-coverage": "^2.0.1", + "convert-source-map": "^1.6.0", + "source-map": "^0.7.3" + }, + "engines": { + "node": ">=10.12.0" + } + }, + "node_modules/v8-to-istanbul/node_modules/convert-source-map": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/convert-source-map/-/convert-source-map-1.9.0.tgz", + "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==", + "dev": true + }, + "node_modules/v8-to-istanbul/node_modules/source-map": { + "version": "0.7.4", + "resolved": "https://registry.npmmirror.com/source-map/-/source-map-0.7.4.tgz", + "integrity": "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/validate-npm-package-license": { + "version": "3.0.4", + "resolved": "https://registry.npmmirror.com/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", + "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", + "dependencies": { + "spdx-correct": "^3.0.0", + "spdx-expression-parse": "^3.0.0" + } + }, + "node_modules/validate-npm-package-name": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/validate-npm-package-name/-/validate-npm-package-name-3.0.0.tgz", + "integrity": "sha512-M6w37eVCMMouJ9V/sdPGnC5H4uDr73/+xdq0FBLO3TFFX1+7wiUY6Es328NN+y43tmY+doUdN9g9J21vqB7iLw==", + "dependencies": { + "builtins": "^1.0.3" + } + }, + "node_modules/value-or-promise": { + "version": "1.0.11", + "resolved": "https://registry.npmmirror.com/value-or-promise/-/value-or-promise-1.0.11.tgz", + "integrity": "sha512-41BrgH+dIbCFXClcSapVs5M6GkENd3gQOJpEfPDNa71LsUGMXDL0jMWpI/Rh7WhX+Aalfz2TTS3Zt5pUsbnhLg==", + "engines": { + "node": ">=12" + } + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmmirror.com/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/vm-browserify": { + "version": "1.1.2", + "resolved": "https://registry.npmmirror.com/vm-browserify/-/vm-browserify-1.1.2.tgz", + "integrity": "sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ==", + "peer": true + }, + "node_modules/vue": { + "version": "3.4.27", + "resolved": "https://registry.npmmirror.com/vue/-/vue-3.4.27.tgz", + "integrity": "sha512-8s/56uK6r01r1icG/aEOHqyMVxd1bkYcSe9j8HcKtr/xTOFWvnzIVTehNW+5Yt89f+DLBe4A569pnZLS5HzAMA==", + "dependencies": { + "@vue/compiler-dom": "3.4.27", + "@vue/compiler-sfc": "3.4.27", + "@vue/runtime-dom": "3.4.27", + "@vue/server-renderer": "3.4.27", + "@vue/shared": "3.4.27" + }, + "peerDependencies": { + "typescript": "*" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/vue-codemod": { + "version": "0.0.5", + "resolved": "https://registry.npmmirror.com/vue-codemod/-/vue-codemod-0.0.5.tgz", + "integrity": "sha512-DE+24W1d3oanGqq7yna4ddOKXmVzjECgku2ddMcm7OS9Bp9QOblMHT88PzKiCc7npGiHf5+mTfrEW1JVIBbA2A==", + "dependencies": { + "@babel/core": "^7.10.3", + "@babel/preset-env": "^7.10.3", + "@babel/types": "^7.12.12", + "@types/jscodeshift": "^0.7.1", + "@vue/compiler-core": "^3.0.5", + "@vue/compiler-dom": "^3.0.5", + "debug": "^4.1.1", + "globby": "^11.0.2", + "inquirer": "^7.0.3", + "jscodeshift": "^0.11.0", + "lru-cache": "^6.0.0", + "source-map": "^0.6.1", + "yargs": "^16.2.0" + }, + "bin": { + "vue-codemod": "dist/bin/vue-codemod.js" + }, + "engines": { + "node": ">= 10.0" + } + }, + "node_modules/vue-codemod/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/vue-codemod/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/vue-codemod/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/vue-codemod/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/vue-codemod/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/vue-codemod/node_modules/inquirer": { + "version": "7.3.3", + "resolved": "https://registry.npmmirror.com/inquirer/-/inquirer-7.3.3.tgz", + "integrity": "sha512-JG3eIAj5V9CwcGvuOmoo6LB9kbAYT8HXffUl6memuszlwDC/qvFAJw49XJ5NROSFNPxp3iQg1GqkFhaY/CR0IA==", + "dependencies": { + "ansi-escapes": "^4.2.1", + "chalk": "^4.1.0", + "cli-cursor": "^3.1.0", + "cli-width": "^3.0.0", + "external-editor": "^3.0.3", + "figures": "^3.0.0", + "lodash": "^4.17.19", + "mute-stream": "0.0.8", + "run-async": "^2.4.0", + "rxjs": "^6.6.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0", + "through": "^2.3.6" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/vue-codemod/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/vue-codemod/node_modules/rxjs": { + "version": "6.6.7", + "resolved": "https://registry.npmmirror.com/rxjs/-/rxjs-6.6.7.tgz", + "integrity": "sha512-hTdwr+7yYNIT5n4AMYp85KA6yw2Va0FLa3Rguvbpa4W3I5xynaBZo41cM3XM+4Q6fRMj3sBYIR1VAmZMXYJvRQ==", + "dependencies": { + "tslib": "^1.9.0" + }, + "engines": { + "npm": ">=2.0.0" + } + }, + "node_modules/vue-codemod/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/vue-codemod/node_modules/tslib": { + "version": "1.14.1", + "resolved": "https://registry.npmmirror.com/tslib/-/tslib-1.14.1.tgz", + "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==" + }, + "node_modules/vue-codemod/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + }, + "node_modules/vue-component-type-helpers": { + "version": "2.0.19", + "resolved": "https://registry.npmmirror.com/vue-component-type-helpers/-/vue-component-type-helpers-2.0.19.tgz", + "integrity": "sha512-cN3f1aTxxKo4lzNeQAkVopswuImUrb5Iurll9Gaw5cqpnbTAxtEMM1mgi6ou4X79OCyqYv1U1mzBHJkzmiK82w==", + "dev": true + }, + "node_modules/vue-eslint-parser": { + "version": "8.3.0", + "resolved": "https://registry.npmmirror.com/vue-eslint-parser/-/vue-eslint-parser-8.3.0.tgz", + "integrity": "sha512-dzHGG3+sYwSf6zFBa0Gi9ZDshD7+ad14DGOdTLjruRVgZXe2J+DcZ9iUhyR48z5g1PqRa20yt3Njna/veLJL/g==", + "dev": true, + "dependencies": { + "debug": "^4.3.2", + "eslint-scope": "^7.0.0", + "eslint-visitor-keys": "^3.1.0", + "espree": "^9.0.0", + "esquery": "^1.4.0", + "lodash": "^4.17.21", + "semver": "^7.3.5" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/mysticatea" + }, + "peerDependencies": { + "eslint": ">=6.0.0" + } + }, + "node_modules/vue-eslint-parser/node_modules/eslint-scope": { + "version": "7.2.2", + "resolved": "https://registry.npmmirror.com/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/vue-eslint-parser/node_modules/espree": { + "version": "9.6.1", + "resolved": "https://registry.npmmirror.com/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "dev": true, + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/vue-eslint-parser/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmmirror.com/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/vue-eslint-parser/node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/vue-hot-reload-api": { + "version": "2.3.4", + "resolved": "https://registry.npmmirror.com/vue-hot-reload-api/-/vue-hot-reload-api-2.3.4.tgz", + "integrity": "sha512-BXq3jwIagosjgNVae6tkHzzIk6a8MHFtzAdwhnV5VlvPTFxDCvIttgSiHWjdGoTJvXtmRu5HacExfdarRcFhog==", + "dev": true + }, + "node_modules/vue-i18n": { + "version": "9.13.1", + "resolved": "https://registry.npmmirror.com/vue-i18n/-/vue-i18n-9.13.1.tgz", + "integrity": "sha512-mh0GIxx0wPtPlcB1q4k277y0iKgo25xmDPWioVVYanjPufDBpvu5ySTjP5wOrSvlYQ2m1xI+CFhGdauv/61uQg==", + "dependencies": { + "@intlify/core-base": "9.13.1", + "@intlify/shared": "9.13.1", + "@vue/devtools-api": "^6.5.0" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + }, + "peerDependencies": { + "vue": "^3.0.0" + } + }, + "node_modules/vue-loader": { + "version": "17.4.2", + "resolved": "https://registry.npmmirror.com/vue-loader/-/vue-loader-17.4.2.tgz", + "integrity": "sha512-yTKOA4R/VN4jqjw4y5HrynFL8AK0Z3/Jt7eOJXEitsm0GMRHDBjCfCiuTiLP7OESvsZYo2pATCWhDqxC5ZrM6w==", + "dev": true, + "dependencies": { + "chalk": "^4.1.0", + "hash-sum": "^2.0.0", + "watchpack": "^2.4.0" + }, + "peerDependencies": { + "webpack": "^4.1.0 || ^5.0.0-0" + }, + "peerDependenciesMeta": { + "@vue/compiler-sfc": { + "optional": true + }, + "vue": { + "optional": true + } + } + }, + "node_modules/vue-loader/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/vue-loader/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/vue-loader/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/vue-loader/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/vue-loader/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/vue-loader/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/vue-pdf": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/vue-pdf/-/vue-pdf-4.3.0.tgz", + "integrity": "sha512-zd3lJj6CbtrawgaaDDciTDjkJMUKiLWtbEmBg5CvFn9Noe9oAO/GNy/fc5c59qGuFCJ14ibIV1baw4S07e5bSQ==", + "dependencies": { + "babel-plugin-syntax-dynamic-import": "^6.18.0", + "loader-utils": "^1.4.0", + "pdfjs-dist": "2.6.347", + "raw-loader": "^4.0.2", + "vue-resize-sensor": "^2.0.0", + "worker-loader": "^2.0.0" + } + }, + "node_modules/vue-pdf/node_modules/@webassemblyjs/ast": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/ast/-/ast-1.9.0.tgz", + "integrity": "sha512-C6wW5L+b7ogSDVqymbkkvuW9kruN//YisMED04xzeBBqjHa2FYnmvOlS6Xj68xWQRgWvI9cIglsjFowH/RJyEA==", + "peer": true, + "dependencies": { + "@webassemblyjs/helper-module-context": "1.9.0", + "@webassemblyjs/helper-wasm-bytecode": "1.9.0", + "@webassemblyjs/wast-parser": "1.9.0" + } + }, + "node_modules/vue-pdf/node_modules/@webassemblyjs/helper-api-error": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/helper-api-error/-/helper-api-error-1.9.0.tgz", + "integrity": "sha512-NcMLjoFMXpsASZFxJ5h2HZRcEhDkvnNFOAKneP5RbKRzaWJN36NC4jqQHKwStIhGXu5mUWlUUk7ygdtrO8lbmw==", + "peer": true + }, + "node_modules/vue-pdf/node_modules/@webassemblyjs/helper-buffer": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/helper-buffer/-/helper-buffer-1.9.0.tgz", + "integrity": "sha512-qZol43oqhq6yBPx7YM3m9Bv7WMV9Eevj6kMi6InKOuZxhw+q9hOkvq5e/PpKSiLfyetpaBnogSbNCfBwyB00CA==", + "peer": true + }, + "node_modules/vue-pdf/node_modules/@webassemblyjs/helper-wasm-bytecode": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.9.0.tgz", + "integrity": "sha512-R7FStIzyNcd7xKxCZH5lE0Bqy+hGTwS3LJjuv1ZVxd9O7eHCedSdrId/hMOd20I+v8wDXEn+bjfKDLzTepoaUw==", + "peer": true + }, + "node_modules/vue-pdf/node_modules/@webassemblyjs/helper-wasm-section": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.9.0.tgz", + "integrity": "sha512-XnMB8l3ek4tvrKUUku+IVaXNHz2YsJyOOmz+MMkZvh8h1uSJpSen6vYnw3IoQ7WwEuAhL8Efjms1ZWjqh2agvw==", + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.9.0", + "@webassemblyjs/helper-buffer": "1.9.0", + "@webassemblyjs/helper-wasm-bytecode": "1.9.0", + "@webassemblyjs/wasm-gen": "1.9.0" + } + }, + "node_modules/vue-pdf/node_modules/@webassemblyjs/ieee754": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/ieee754/-/ieee754-1.9.0.tgz", + "integrity": "sha512-dcX8JuYU/gvymzIHc9DgxTzUUTLexWwt8uCTWP3otys596io0L5aW02Gb1RjYpx2+0Jus1h4ZFqjla7umFniTg==", + "peer": true, + "dependencies": { + "@xtuc/ieee754": "^1.2.0" + } + }, + "node_modules/vue-pdf/node_modules/@webassemblyjs/leb128": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/leb128/-/leb128-1.9.0.tgz", + "integrity": "sha512-ENVzM5VwV1ojs9jam6vPys97B/S65YQtv/aanqnU7D8aSoHFX8GyhGg0CMfyKNIHBuAVjy3tlzd5QMMINa7wpw==", + "peer": true, + "dependencies": { + "@xtuc/long": "4.2.2" + } + }, + "node_modules/vue-pdf/node_modules/@webassemblyjs/utf8": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/utf8/-/utf8-1.9.0.tgz", + "integrity": "sha512-GZbQlWtopBTP0u7cHrEx+73yZKrQoBMpwkGEIqlacljhXCkVM1kMQge/Mf+csMJAjEdSwhOyLAS0AoR3AG5P8w==", + "peer": true + }, + "node_modules/vue-pdf/node_modules/@webassemblyjs/wasm-edit": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/wasm-edit/-/wasm-edit-1.9.0.tgz", + "integrity": "sha512-FgHzBm80uwz5M8WKnMTn6j/sVbqilPdQXTWraSjBwFXSYGirpkSWE2R9Qvz9tNiTKQvoKILpCuTjBKzOIm0nxw==", + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.9.0", + "@webassemblyjs/helper-buffer": "1.9.0", + "@webassemblyjs/helper-wasm-bytecode": "1.9.0", + "@webassemblyjs/helper-wasm-section": "1.9.0", + "@webassemblyjs/wasm-gen": "1.9.0", + "@webassemblyjs/wasm-opt": "1.9.0", + "@webassemblyjs/wasm-parser": "1.9.0", + "@webassemblyjs/wast-printer": "1.9.0" + } + }, + "node_modules/vue-pdf/node_modules/@webassemblyjs/wasm-gen": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/wasm-gen/-/wasm-gen-1.9.0.tgz", + "integrity": "sha512-cPE3o44YzOOHvlsb4+E9qSqjc9Qf9Na1OO/BHFy4OI91XDE14MjFN4lTMezzaIWdPqHnsTodGGNP+iRSYfGkjA==", + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.9.0", + "@webassemblyjs/helper-wasm-bytecode": "1.9.0", + "@webassemblyjs/ieee754": "1.9.0", + "@webassemblyjs/leb128": "1.9.0", + "@webassemblyjs/utf8": "1.9.0" + } + }, + "node_modules/vue-pdf/node_modules/@webassemblyjs/wasm-opt": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/wasm-opt/-/wasm-opt-1.9.0.tgz", + "integrity": "sha512-Qkjgm6Anhm+OMbIL0iokO7meajkzQD71ioelnfPEj6r4eOFuqm4YC3VBPqXjFyyNwowzbMD+hizmprP/Fwkl2A==", + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.9.0", + "@webassemblyjs/helper-buffer": "1.9.0", + "@webassemblyjs/wasm-gen": "1.9.0", + "@webassemblyjs/wasm-parser": "1.9.0" + } + }, + "node_modules/vue-pdf/node_modules/@webassemblyjs/wasm-parser": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/wasm-parser/-/wasm-parser-1.9.0.tgz", + "integrity": "sha512-9+wkMowR2AmdSWQzsPEjFU7njh8HTO5MqO8vjwEHuM+AMHioNqSBONRdr0NQQ3dVQrzp0s8lTcYqzUdb7YgELA==", + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.9.0", + "@webassemblyjs/helper-api-error": "1.9.0", + "@webassemblyjs/helper-wasm-bytecode": "1.9.0", + "@webassemblyjs/ieee754": "1.9.0", + "@webassemblyjs/leb128": "1.9.0", + "@webassemblyjs/utf8": "1.9.0" + } + }, + "node_modules/vue-pdf/node_modules/@webassemblyjs/wast-printer": { + "version": "1.9.0", + "resolved": "https://registry.npmmirror.com/@webassemblyjs/wast-printer/-/wast-printer-1.9.0.tgz", + "integrity": "sha512-2J0nE95rHXHyQ24cWjMKJ1tqB/ds8z/cyeOZxJhcb+rW+SQASVjuznUSmdz5GpVJTzU8JkhYut0D3siFDD6wsA==", + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.9.0", + "@webassemblyjs/wast-parser": "1.9.0", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/vue-pdf/node_modules/acorn": { + "version": "6.4.2", + "resolved": "https://registry.npmmirror.com/acorn/-/acorn-6.4.2.tgz", + "integrity": "sha512-XtGIhXwF8YM8bJhGxG5kXgjkEuNGLTkoYqVE+KMR+aspr4KGYmKYg7yUe3KghyQ9yheNwLnjmzh/7+gfDBmHCQ==", + "peer": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/vue-pdf/node_modules/braces": { + "version": "2.3.2", + "resolved": "https://registry.npmmirror.com/braces/-/braces-2.3.2.tgz", + "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", + "peer": true, + "dependencies": { + "arr-flatten": "^1.1.0", + "array-unique": "^0.3.2", + "extend-shallow": "^2.0.1", + "fill-range": "^4.0.0", + "isobject": "^3.0.1", + "repeat-element": "^1.1.2", + "snapdragon": "^0.8.1", + "snapdragon-node": "^2.0.1", + "split-string": "^3.0.2", + "to-regex": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/vue-pdf/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmmirror.com/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", + "peer": true + }, + "node_modules/vue-pdf/node_modules/enhanced-resolve": { + "version": "4.5.0", + "resolved": "https://registry.npmmirror.com/enhanced-resolve/-/enhanced-resolve-4.5.0.tgz", + "integrity": "sha512-Nv9m36S/vxpsI+Hc4/ZGRs0n9mXqSWGGq49zxb/cJfPAQMbUtttJAlNPS4AQzaBdw/pKskw5bMbekT/Y7W/Wlg==", + "peer": true, + "dependencies": { + "graceful-fs": "^4.1.2", + "memory-fs": "^0.5.0", + "tapable": "^1.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/vue-pdf/node_modules/enhanced-resolve/node_modules/memory-fs": { + "version": "0.5.0", + "resolved": "https://registry.npmmirror.com/memory-fs/-/memory-fs-0.5.0.tgz", + "integrity": "sha512-jA0rdU5KoQMC0e6ppoNRtpp6vjFq6+NY7r8hywnC7V+1Xj/MtHwGIbB1QaK/dunyjWteJzmkpd7ooeWg10T7GA==", + "peer": true, + "dependencies": { + "errno": "^0.1.3", + "readable-stream": "^2.0.1" + }, + "engines": { + "node": ">=4.3.0 <5.0.0 || >=5.10" + } + }, + "node_modules/vue-pdf/node_modules/eslint-scope": { + "version": "4.0.3", + "resolved": "https://registry.npmmirror.com/eslint-scope/-/eslint-scope-4.0.3.tgz", + "integrity": "sha512-p7VutNr1O/QrxysMo3E45FjYDTeXBy0iTltPFNSqKAIfjDSXC+4dj+qfyuD8bfAXrW/y6lW3O76VaYNPKfpKrg==", + "peer": true, + "dependencies": { + "esrecurse": "^4.1.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/vue-pdf/node_modules/fill-range": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/fill-range/-/fill-range-4.0.0.tgz", + "integrity": "sha512-VcpLTWqWDiTerugjj8e3+esbg+skS3M9e54UuR3iCeIDMXCLTsAH8hTSzDQU/X6/6t3eYkOKoZSef2PlU6U1XQ==", + "peer": true, + "dependencies": { + "extend-shallow": "^2.0.1", + "is-number": "^3.0.0", + "repeat-string": "^1.6.1", + "to-regex-range": "^2.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/vue-pdf/node_modules/find-cache-dir": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/find-cache-dir/-/find-cache-dir-2.1.0.tgz", + "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", + "peer": true, + "dependencies": { + "commondir": "^1.0.1", + "make-dir": "^2.0.0", + "pkg-dir": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/vue-pdf/node_modules/find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "peer": true, + "dependencies": { + "locate-path": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/vue-pdf/node_modules/is-extendable": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/is-extendable/-/is-extendable-1.0.1.tgz", + "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "peer": true, + "dependencies": { + "is-plain-object": "^2.0.4" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/vue-pdf/node_modules/is-number": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/is-number/-/is-number-3.0.0.tgz", + "integrity": "sha512-4cboCqIpliH+mAvFNegjZQ4kgKc3ZUhQVr3HvWbSh5q3WH2v82ct+T2Y1hdU5Gdtorx/cLifQjqCbL7bpznLTg==", + "peer": true, + "dependencies": { + "kind-of": "^3.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/vue-pdf/node_modules/is-number/node_modules/kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmmirror.com/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "peer": true, + "dependencies": { + "is-buffer": "^1.1.5" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/vue-pdf/node_modules/is-wsl": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/is-wsl/-/is-wsl-1.1.0.tgz", + "integrity": "sha512-gfygJYZ2gLTDlmbWMI0CE2MwnFzSN/2SZfkMlItC4K/JBlsWVDB0bO6XhqcY13YXE7iMcAJnzTCJjPiTeJJ0Mw==", + "peer": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/vue-pdf/node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmmirror.com/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "peer": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/vue-pdf/node_modules/loader-runner": { + "version": "2.4.0", + "resolved": "https://registry.npmmirror.com/loader-runner/-/loader-runner-2.4.0.tgz", + "integrity": "sha512-Jsmr89RcXGIwivFY21FcRrisYZfvLMTWx5kOLc+JTxtpBOG6xML0vzbc6SEQG2FO9/4Fc3wW4LVcB5DmGflaRw==", + "peer": true, + "engines": { + "node": ">=4.3.0 <5.0.0 || >=5.10" + } + }, + "node_modules/vue-pdf/node_modules/locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "peer": true, + "dependencies": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/vue-pdf/node_modules/make-dir": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/make-dir/-/make-dir-2.1.0.tgz", + "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", + "peer": true, + "dependencies": { + "pify": "^4.0.1", + "semver": "^5.6.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/vue-pdf/node_modules/micromatch": { + "version": "3.1.10", + "resolved": "https://registry.npmmirror.com/micromatch/-/micromatch-3.1.10.tgz", + "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", + "peer": true, + "dependencies": { + "arr-diff": "^4.0.0", + "array-unique": "^0.3.2", + "braces": "^2.3.1", + "define-property": "^2.0.2", + "extend-shallow": "^3.0.2", + "extglob": "^2.0.4", + "fragment-cache": "^0.2.1", + "kind-of": "^6.0.2", + "nanomatch": "^1.2.9", + "object.pick": "^1.3.0", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/vue-pdf/node_modules/micromatch/node_modules/extend-shallow": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/extend-shallow/-/extend-shallow-3.0.2.tgz", + "integrity": "sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==", + "peer": true, + "dependencies": { + "assign-symbols": "^1.0.0", + "is-extendable": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/vue-pdf/node_modules/p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "peer": true, + "dependencies": { + "p-limit": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/vue-pdf/node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", + "peer": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/vue-pdf/node_modules/pdfjs-dist": { + "version": "2.6.347", + "resolved": "https://registry.npmmirror.com/pdfjs-dist/-/pdfjs-dist-2.6.347.tgz", + "integrity": "sha512-QC+h7hG2su9v/nU1wEI3SnpPIrqJODL7GTDFvR74ANKGq1AFJW16PH8VWnhpiTi9YcLSFV9xLeWSgq+ckHLdVQ==" + }, + "node_modules/vue-pdf/node_modules/pify": { + "version": "4.0.1", + "resolved": "https://registry.npmmirror.com/pify/-/pify-4.0.1.tgz", + "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", + "peer": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/vue-pdf/node_modules/pkg-dir": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/pkg-dir/-/pkg-dir-3.0.0.tgz", + "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", + "peer": true, + "dependencies": { + "find-up": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/vue-pdf/node_modules/schema-utils": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/schema-utils/-/schema-utils-1.0.0.tgz", + "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", + "peer": true, + "dependencies": { + "ajv": "^6.1.0", + "ajv-errors": "^1.0.0", + "ajv-keywords": "^3.1.0" + }, + "engines": { + "node": ">= 4" + } + }, + "node_modules/vue-pdf/node_modules/semver": { + "version": "5.7.2", + "resolved": "https://registry.npmmirror.com/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", + "peer": true, + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/vue-pdf/node_modules/serialize-javascript": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/serialize-javascript/-/serialize-javascript-4.0.0.tgz", + "integrity": "sha512-GaNA54380uFefWghODBWEGisLZFj00nS5ACs6yHa9nLqlLpVLO8ChDGeKRjZnV4Nh4n0Qi7nhYZD/9fCPzEqkw==", + "peer": true, + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/vue-pdf/node_modules/terser": { + "version": "4.8.1", + "resolved": "https://registry.npmmirror.com/terser/-/terser-4.8.1.tgz", + "integrity": "sha512-4GnLC0x667eJG0ewJTa6z/yXrbLGv80D9Ru6HIpCQmO+Q4PfEtBFi0ObSckqwL6VyQv/7ENJieXHo2ANmdQwgw==", + "peer": true, + "dependencies": { + "commander": "^2.20.0", + "source-map": "~0.6.1", + "source-map-support": "~0.5.12" + }, + "bin": { + "terser": "bin/terser" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/vue-pdf/node_modules/terser-webpack-plugin": { + "version": "1.4.5", + "resolved": "https://registry.npmmirror.com/terser-webpack-plugin/-/terser-webpack-plugin-1.4.5.tgz", + "integrity": "sha512-04Rfe496lN8EYruwi6oPQkG0vo8C+HT49X687FZnpPF0qMAIHONI6HEXYPKDOE8e5HjXTyKfqRd/agHtH0kOtw==", + "peer": true, + "dependencies": { + "cacache": "^12.0.2", + "find-cache-dir": "^2.1.0", + "is-wsl": "^1.1.0", + "schema-utils": "^1.0.0", + "serialize-javascript": "^4.0.0", + "source-map": "^0.6.1", + "terser": "^4.1.2", + "webpack-sources": "^1.4.0", + "worker-farm": "^1.7.0" + }, + "engines": { + "node": ">= 6.9.0" + }, + "peerDependencies": { + "webpack": "^4.0.0" + } + }, + "node_modules/vue-pdf/node_modules/to-regex-range": { + "version": "2.1.1", + "resolved": "https://registry.npmmirror.com/to-regex-range/-/to-regex-range-2.1.1.tgz", + "integrity": "sha512-ZZWNfCjUokXXDGXFpZehJIkZqq91BcULFq/Pi7M5i4JnxXdhMKAK682z8bCW3o8Hj1wuuzoKcW3DfVzaP6VuNg==", + "peer": true, + "dependencies": { + "is-number": "^3.0.0", + "repeat-string": "^1.6.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/vue-pdf/node_modules/watchpack": { + "version": "1.7.5", + "resolved": "https://registry.npmmirror.com/watchpack/-/watchpack-1.7.5.tgz", + "integrity": "sha512-9P3MWk6SrKjHsGkLT2KHXdQ/9SNkyoJbabxnKOoJepsvJjJG8uYTR3yTPxPQvNDI3w4Nz1xnE0TLHK4RIVe/MQ==", + "peer": true, + "dependencies": { + "graceful-fs": "^4.1.2", + "neo-async": "^2.5.0" + }, + "optionalDependencies": { + "chokidar": "^3.4.1", + "watchpack-chokidar2": "^2.0.1" + } + }, + "node_modules/vue-pdf/node_modules/webpack": { + "version": "4.47.0", + "resolved": "https://registry.npmmirror.com/webpack/-/webpack-4.47.0.tgz", + "integrity": "sha512-td7fYwgLSrky3fI1EuU5cneU4+pbH6GgOfuKNS1tNPcfdGinGELAqsb/BP4nnvZyKSG2i/xFGU7+n2PvZA8HJQ==", + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.9.0", + "@webassemblyjs/helper-module-context": "1.9.0", + "@webassemblyjs/wasm-edit": "1.9.0", + "@webassemblyjs/wasm-parser": "1.9.0", + "acorn": "^6.4.1", + "ajv": "^6.10.2", + "ajv-keywords": "^3.4.1", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^4.5.0", + "eslint-scope": "^4.0.3", + "json-parse-better-errors": "^1.0.2", + "loader-runner": "^2.4.0", + "loader-utils": "^1.2.3", + "memory-fs": "^0.4.1", + "micromatch": "^3.1.10", + "mkdirp": "^0.5.3", + "neo-async": "^2.6.1", + "node-libs-browser": "^2.2.1", + "schema-utils": "^1.0.0", + "tapable": "^1.1.3", + "terser-webpack-plugin": "^1.4.3", + "watchpack": "^1.7.4", + "webpack-sources": "^1.4.1" + }, + "bin": { + "webpack": "bin/webpack.js" + }, + "engines": { + "node": ">=6.11.5" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependenciesMeta": { + "webpack-cli": { + "optional": true + }, + "webpack-command": { + "optional": true + } + } + }, + "node_modules/vue-pdf/node_modules/webpack-sources": { + "version": "1.4.3", + "resolved": "https://registry.npmmirror.com/webpack-sources/-/webpack-sources-1.4.3.tgz", + "integrity": "sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ==", + "peer": true, + "dependencies": { + "source-list-map": "^2.0.0", + "source-map": "~0.6.1" + } + }, + "node_modules/vue-pdf/node_modules/worker-loader": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/worker-loader/-/worker-loader-2.0.0.tgz", + "integrity": "sha512-tnvNp4K3KQOpfRnD20m8xltE3eWh89Ye+5oj7wXEEHKac1P4oZ6p9oTj8/8ExqoSBnk9nu5Pr4nKfQ1hn2APJw==", + "dependencies": { + "loader-utils": "^1.0.0", + "schema-utils": "^0.4.0" + }, + "engines": { + "node": ">= 6.9.0 || >= 8.9.0" + }, + "peerDependencies": { + "webpack": "^3.0.0 || ^4.0.0-alpha.0 || ^4.0.0" + } + }, + "node_modules/vue-pdf/node_modules/worker-loader/node_modules/schema-utils": { + "version": "0.4.7", + "resolved": "https://registry.npmmirror.com/schema-utils/-/schema-utils-0.4.7.tgz", + "integrity": "sha512-v/iwU6wvwGK8HbU9yi3/nhGzP0yGSuhQMzL6ySiec1FSrZZDkhm4noOSWzrNFo/jEc+SJY6jRTwuwbSXJPDUnQ==", + "dependencies": { + "ajv": "^6.1.0", + "ajv-keywords": "^3.1.0" + }, + "engines": { + "node": ">= 4" + } + }, + "node_modules/vue-resize-sensor": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/vue-resize-sensor/-/vue-resize-sensor-2.0.0.tgz", + "integrity": "sha512-W+y2EAI/BxS4Vlcca9scQv8ifeBFck56DRtSwWJ2H4Cw1GLNUYxiZxUHHkuzuI5JPW/cYtL1bPO5xPyEXx4LmQ==" + }, + "node_modules/vue-router": { + "version": "4.3.2", + "resolved": "https://registry.npmmirror.com/vue-router/-/vue-router-4.3.2.tgz", + "integrity": "sha512-hKQJ1vDAZ5LVkKEnHhmm1f9pMiWIBNGF5AwU67PdH7TyXCj/a4hTccuUuYCAMgJK6rO/NVYtQIEN3yL8CECa7Q==", + "dependencies": { + "@vue/devtools-api": "^6.5.1" + }, + "funding": { + "url": "https://github.com/sponsors/posva" + }, + "peerDependencies": { + "vue": "^3.2.0" + } + }, + "node_modules/vue-style-loader": { + "version": "4.1.3", + "resolved": "https://registry.npmmirror.com/vue-style-loader/-/vue-style-loader-4.1.3.tgz", + "integrity": "sha512-sFuh0xfbtpRlKfm39ss/ikqs9AbKCoXZBpHeVZ8Tx650o0k0q/YCM7FRvigtxpACezfq6af+a7JeqVTWvncqDg==", + "dev": true, + "dependencies": { + "hash-sum": "^1.0.2", + "loader-utils": "^1.0.2" + } + }, + "node_modules/vue-style-loader/node_modules/hash-sum": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/hash-sum/-/hash-sum-1.0.2.tgz", + "integrity": "sha512-fUs4B4L+mlt8/XAtSOGMUO1TXmAelItBPtJG7CyHJfYTdDjwisntGO2JQz7oUsatOY9o68+57eziUVNw/mRHmA==", + "dev": true + }, + "node_modules/vue-template-es2015-compiler": { + "version": "1.9.1", + "resolved": "https://registry.npmmirror.com/vue-template-es2015-compiler/-/vue-template-es2015-compiler-1.9.1.tgz", + "integrity": "sha512-4gDntzrifFnCEvyoO8PqyJDmguXgVPxKiIxrBKjIowvL9l+N66196+72XVYR8BBf1Uv1Fgt3bGevJ+sEmxfZzw==", + "dev": true + }, + "node_modules/vue-types": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/vue-types/-/vue-types-3.0.2.tgz", + "integrity": "sha512-IwUC0Aq2zwaXqy74h4WCvFCUtoV0iSWr0snWnE9TnU18S66GAQyqQbRf2qfJtUuiFsBf6qp0MEwdonlwznlcrw==", + "dependencies": { + "is-plain-object": "3.0.1" + }, + "engines": { + "node": ">=10.15.0" + }, + "peerDependencies": { + "vue": "^3.0.0" + } + }, + "node_modules/vue-types/node_modules/is-plain-object": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/is-plain-object/-/is-plain-object-3.0.1.tgz", + "integrity": "sha512-Xnpx182SBMrr/aBik8y+GuR4U1L9FqMSojwDQwPMmxyC6bvEqly9UBCxhauBF5vNh2gwWJNX6oDV7O+OM4z34g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/vue3-apexcharts": { + "version": "1.5.3", + "resolved": "https://registry.npmmirror.com/vue3-apexcharts/-/vue3-apexcharts-1.5.3.tgz", + "integrity": "sha512-yaHTPoj0iVKAtEVg8wEwIwwvf0VG+lPYNufCf3txRzYQOqdKPoZaZ9P3Dj3X+2A1XY9O1kcTk9HVqvLo+rppvQ==", + "peerDependencies": { + "apexcharts": "> 3.0.0", + "vue": "> 3.0.0" + } + }, + "node_modules/vuex": { + "version": "4.1.0", + "resolved": "https://registry.npmmirror.com/vuex/-/vuex-4.1.0.tgz", + "integrity": "sha512-hmV6UerDrPcgbSy9ORAtNXDr9M4wlNP4pEFKye4ujJF8oqgFFuxDCdOLS3eNoRTtq5O3hoBDh9Doj1bQMYHRbQ==", + "dependencies": { + "@vue/devtools-api": "^6.0.0-beta.11" + }, + "peerDependencies": { + "vue": "^3.2.0" + } + }, + "node_modules/w3c-hr-time": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/w3c-hr-time/-/w3c-hr-time-1.0.2.tgz", + "integrity": "sha512-z8P5DvDNjKDoFIHK7q8r8lackT6l+jo/Ye3HOle7l9nICP9lf1Ci25fy9vHd0JOWewkIFzXIEig3TdKT7JQ5fQ==", + "deprecated": "Use your platform's native performance.now() and performance.timeOrigin.", + "dev": true, + "dependencies": { + "browser-process-hrtime": "^1.0.0" + } + }, + "node_modules/w3c-xmlserializer": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/w3c-xmlserializer/-/w3c-xmlserializer-2.0.0.tgz", + "integrity": "sha512-4tzD0mF8iSiMiNs30BiLO3EpfGLZUT2MSX/G+o7ZywDzliWQ3OPtTZ0PTC3B3ca1UAf4cJMHB+2Bf56EriJuRA==", + "dev": true, + "dependencies": { + "xml-name-validator": "^3.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/walker": { + "version": "1.0.8", + "resolved": "https://registry.npmmirror.com/walker/-/walker-1.0.8.tgz", + "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==", + "dev": true, + "dependencies": { + "makeerror": "1.0.12" + } + }, + "node_modules/warning": { + "version": "4.0.3", + "resolved": "https://registry.npmmirror.com/warning/-/warning-4.0.3.tgz", + "integrity": "sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==", + "dependencies": { + "loose-envify": "^1.0.0" + } + }, + "node_modules/watchpack": { + "version": "2.4.1", + "resolved": "https://registry.npmmirror.com/watchpack/-/watchpack-2.4.1.tgz", + "integrity": "sha512-8wrBCMtVhqcXP2Sup1ctSkga6uc2Bx0IIvKyT7yTFier5AXHooSI+QyQQAtTb7+E0IUCCKyTFmXqdqgum2XWGg==", + "dependencies": { + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.1.2" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/watchpack-chokidar2": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/watchpack-chokidar2/-/watchpack-chokidar2-2.0.1.tgz", + "integrity": "sha512-nCFfBIPKr5Sh61s4LPpy1Wtfi0HE8isJ3d2Yb5/Ppw2P2B/3eVSEBjKfN0fmHJSK14+31KwMKmcrzs2GM4P0Ww==", + "optional": true, + "peer": true, + "dependencies": { + "chokidar": "^2.1.8" + } + }, + "node_modules/watchpack-chokidar2/node_modules/anymatch": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/anymatch/-/anymatch-2.0.0.tgz", + "integrity": "sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw==", + "optional": true, + "peer": true, + "dependencies": { + "micromatch": "^3.1.4", + "normalize-path": "^2.1.1" + } + }, + "node_modules/watchpack-chokidar2/node_modules/anymatch/node_modules/normalize-path": { + "version": "2.1.1", + "resolved": "https://registry.npmmirror.com/normalize-path/-/normalize-path-2.1.1.tgz", + "integrity": "sha512-3pKJwH184Xo/lnH6oyP1q2pMd7HcypqqmRs91/6/i2CGtWwIKGCkOOMTm/zXbgTEWHw1uNpNi/igc3ePOYHb6w==", + "optional": true, + "peer": true, + "dependencies": { + "remove-trailing-separator": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/watchpack-chokidar2/node_modules/binary-extensions": { + "version": "1.13.1", + "resolved": "https://registry.npmmirror.com/binary-extensions/-/binary-extensions-1.13.1.tgz", + "integrity": "sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw==", + "optional": true, + "peer": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/watchpack-chokidar2/node_modules/braces": { + "version": "2.3.2", + "resolved": "https://registry.npmmirror.com/braces/-/braces-2.3.2.tgz", + "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", + "optional": true, + "peer": true, + "dependencies": { + "arr-flatten": "^1.1.0", + "array-unique": "^0.3.2", + "extend-shallow": "^2.0.1", + "fill-range": "^4.0.0", + "isobject": "^3.0.1", + "repeat-element": "^1.1.2", + "snapdragon": "^0.8.1", + "snapdragon-node": "^2.0.1", + "split-string": "^3.0.2", + "to-regex": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/watchpack-chokidar2/node_modules/chokidar": { + "version": "2.1.8", + "resolved": "https://registry.npmmirror.com/chokidar/-/chokidar-2.1.8.tgz", + "integrity": "sha512-ZmZUazfOzf0Nve7duiCKD23PFSCs4JPoYyccjUFF3aQkQadqBhfzhjkwBH2mNOG9cTBwhamM37EIsIkZw3nRgg==", + "deprecated": "Chokidar 2 does not receive security updates since 2019. Upgrade to chokidar 3 with 15x fewer dependencies", + "optional": true, + "peer": true, + "dependencies": { + "anymatch": "^2.0.0", + "async-each": "^1.0.1", + "braces": "^2.3.2", + "glob-parent": "^3.1.0", + "inherits": "^2.0.3", + "is-binary-path": "^1.0.0", + "is-glob": "^4.0.0", + "normalize-path": "^3.0.0", + "path-is-absolute": "^1.0.0", + "readdirp": "^2.2.1", + "upath": "^1.1.1" + }, + "optionalDependencies": { + "fsevents": "^1.2.7" + } + }, + "node_modules/watchpack-chokidar2/node_modules/fill-range": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/fill-range/-/fill-range-4.0.0.tgz", + "integrity": "sha512-VcpLTWqWDiTerugjj8e3+esbg+skS3M9e54UuR3iCeIDMXCLTsAH8hTSzDQU/X6/6t3eYkOKoZSef2PlU6U1XQ==", + "optional": true, + "peer": true, + "dependencies": { + "extend-shallow": "^2.0.1", + "is-number": "^3.0.0", + "repeat-string": "^1.6.1", + "to-regex-range": "^2.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/watchpack-chokidar2/node_modules/fsevents": { + "version": "1.2.13", + "resolved": "https://registry.npmmirror.com/fsevents/-/fsevents-1.2.13.tgz", + "integrity": "sha512-oWb1Z6mkHIskLzEJ/XWX0srkpkTQ7vaopMQkyaEIoq0fmtFVxOthb8cCxeT+p3ynTdkk/RZwbgG4brR5BeWECw==", + "deprecated": "The v1 package contains DANGEROUS / INSECURE binaries. Upgrade to safe fsevents v2", + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "peer": true, + "dependencies": { + "bindings": "^1.5.0", + "nan": "^2.12.1" + }, + "engines": { + "node": ">= 4.0" + } + }, + "node_modules/watchpack-chokidar2/node_modules/glob-parent": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/glob-parent/-/glob-parent-3.1.0.tgz", + "integrity": "sha512-E8Ak/2+dZY6fnzlR7+ueWvhsH1SjHr4jjss4YS/h4py44jY9MhK/VFdaZJAWDz6BbL21KeteKxFSFpq8OS5gVA==", + "optional": true, + "peer": true, + "dependencies": { + "is-glob": "^3.1.0", + "path-dirname": "^1.0.0" + } + }, + "node_modules/watchpack-chokidar2/node_modules/glob-parent/node_modules/is-glob": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/is-glob/-/is-glob-3.1.0.tgz", + "integrity": "sha512-UFpDDrPgM6qpnFNI+rh/p3bUaq9hKLZN8bMUWzxmcnZVS3omf4IPK+BrewlnWjO1WmUsMYuSjKh4UJuV4+Lqmw==", + "optional": true, + "peer": true, + "dependencies": { + "is-extglob": "^2.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/watchpack-chokidar2/node_modules/is-binary-path": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/is-binary-path/-/is-binary-path-1.0.1.tgz", + "integrity": "sha512-9fRVlXc0uCxEDj1nQzaWONSpbTfx0FmJfzHF7pwlI8DkWGoHBBea4Pg5Ky0ojwwxQmnSifgbKkI06Qv0Ljgj+Q==", + "optional": true, + "peer": true, + "dependencies": { + "binary-extensions": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/watchpack-chokidar2/node_modules/is-extendable": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/is-extendable/-/is-extendable-1.0.1.tgz", + "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "optional": true, + "peer": true, + "dependencies": { + "is-plain-object": "^2.0.4" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/watchpack-chokidar2/node_modules/is-number": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/is-number/-/is-number-3.0.0.tgz", + "integrity": "sha512-4cboCqIpliH+mAvFNegjZQ4kgKc3ZUhQVr3HvWbSh5q3WH2v82ct+T2Y1hdU5Gdtorx/cLifQjqCbL7bpznLTg==", + "optional": true, + "peer": true, + "dependencies": { + "kind-of": "^3.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/watchpack-chokidar2/node_modules/micromatch": { + "version": "3.1.10", + "resolved": "https://registry.npmmirror.com/micromatch/-/micromatch-3.1.10.tgz", + "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", + "optional": true, + "peer": true, + "dependencies": { + "arr-diff": "^4.0.0", + "array-unique": "^0.3.2", + "braces": "^2.3.1", + "define-property": "^2.0.2", + "extend-shallow": "^3.0.2", + "extglob": "^2.0.4", + "fragment-cache": "^0.2.1", + "kind-of": "^6.0.2", + "nanomatch": "^1.2.9", + "object.pick": "^1.3.0", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/watchpack-chokidar2/node_modules/micromatch/node_modules/extend-shallow": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/extend-shallow/-/extend-shallow-3.0.2.tgz", + "integrity": "sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==", + "optional": true, + "peer": true, + "dependencies": { + "assign-symbols": "^1.0.0", + "is-extendable": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/watchpack-chokidar2/node_modules/micromatch/node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmmirror.com/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "optional": true, + "peer": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/watchpack-chokidar2/node_modules/readdirp": { + "version": "2.2.1", + "resolved": "https://registry.npmmirror.com/readdirp/-/readdirp-2.2.1.tgz", + "integrity": "sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ==", + "optional": true, + "peer": true, + "dependencies": { + "graceful-fs": "^4.1.11", + "micromatch": "^3.1.10", + "readable-stream": "^2.0.2" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/watchpack-chokidar2/node_modules/to-regex-range": { + "version": "2.1.1", + "resolved": "https://registry.npmmirror.com/to-regex-range/-/to-regex-range-2.1.1.tgz", + "integrity": "sha512-ZZWNfCjUokXXDGXFpZehJIkZqq91BcULFq/Pi7M5i4JnxXdhMKAK682z8bCW3o8Hj1wuuzoKcW3DfVzaP6VuNg==", + "optional": true, + "peer": true, + "dependencies": { + "is-number": "^3.0.0", + "repeat-string": "^1.6.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wbuf": { + "version": "1.7.3", + "resolved": "https://registry.npmmirror.com/wbuf/-/wbuf-1.7.3.tgz", + "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==", + "dev": true, + "dependencies": { + "minimalistic-assert": "^1.0.0" + } + }, + "node_modules/wcwidth": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/wcwidth/-/wcwidth-1.0.1.tgz", + "integrity": "sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==", + "dependencies": { + "defaults": "^1.0.3" + } + }, + "node_modules/web-streams-polyfill": { + "version": "3.3.3", + "resolved": "https://registry.npmmirror.com/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", + "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/webidl-conversions": { + "version": "6.1.0", + "resolved": "https://registry.npmmirror.com/webidl-conversions/-/webidl-conversions-6.1.0.tgz", + "integrity": "sha512-qBIvFLGiBpLjfwmYAaHPXsn+ho5xZnGvyGvsarywGNc8VyQJUMHJ8OBKGGrPER0okBeMDaan4mNBlgBROxuI8w==", + "dev": true, + "engines": { + "node": ">=10.4" + } + }, + "node_modules/webpack": { + "version": "5.91.0", + "resolved": "https://registry.npmmirror.com/webpack/-/webpack-5.91.0.tgz", + "integrity": "sha512-rzVwlLeBWHJbmgTC/8TvAcu5vpJNII+MelQpylD4jNERPwpBJOE2lEcko1zJX3QJeLjTTAnQxn/OJ8bjDzVQaw==", + "dependencies": { + "@types/eslint-scope": "^3.7.3", + "@types/estree": "^1.0.5", + "@webassemblyjs/ast": "^1.12.1", + "@webassemblyjs/wasm-edit": "^1.12.1", + "@webassemblyjs/wasm-parser": "^1.12.1", + "acorn": "^8.7.1", + "acorn-import-assertions": "^1.9.0", + "browserslist": "^4.21.10", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^5.16.0", + "es-module-lexer": "^1.2.1", + "eslint-scope": "5.1.1", + "events": "^3.2.0", + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.2.11", + "json-parse-even-better-errors": "^2.3.1", + "loader-runner": "^4.2.0", + "mime-types": "^2.1.27", + "neo-async": "^2.6.2", + "schema-utils": "^3.2.0", + "tapable": "^2.1.1", + "terser-webpack-plugin": "^5.3.10", + "watchpack": "^2.4.1", + "webpack-sources": "^3.2.3" + }, + "bin": { + "webpack": "bin/webpack.js" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependenciesMeta": { + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-bundle-analyzer": { + "version": "4.10.2", + "resolved": "https://registry.npmmirror.com/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.10.2.tgz", + "integrity": "sha512-vJptkMm9pk5si4Bv922ZbKLV8UTT4zib4FPgXMhgzUny0bfDDkLXAVQs3ly3fS4/TN9ROFtb0NFrm04UXFE/Vw==", + "dev": true, + "dependencies": { + "@discoveryjs/json-ext": "0.5.7", + "acorn": "^8.0.4", + "acorn-walk": "^8.0.0", + "commander": "^7.2.0", + "debounce": "^1.2.1", + "escape-string-regexp": "^4.0.0", + "gzip-size": "^6.0.0", + "html-escaper": "^2.0.2", + "opener": "^1.5.2", + "picocolors": "^1.0.0", + "sirv": "^2.0.3", + "ws": "^7.3.1" + }, + "bin": { + "webpack-bundle-analyzer": "lib/bin/analyzer.js" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/webpack-bundle-analyzer/node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/webpack-chain": { + "version": "6.5.1", + "resolved": "https://registry.npmmirror.com/webpack-chain/-/webpack-chain-6.5.1.tgz", + "integrity": "sha512-7doO/SRtLu8q5WM0s7vPKPWX580qhi0/yBHkOxNkv50f6qB76Zy9o2wRTrrPULqYTvQlVHuvbA8v+G5ayuUDsA==", + "deprecated": "Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.", + "dev": true, + "dependencies": { + "deepmerge": "^1.5.2", + "javascript-stringify": "^2.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/webpack-chain/node_modules/deepmerge": { + "version": "1.5.2", + "resolved": "https://registry.npmmirror.com/deepmerge/-/deepmerge-1.5.2.tgz", + "integrity": "sha512-95k0GDqvBjZavkuvzx/YqVLv/6YYa17fz6ILMSf7neqQITCPbnfEnQvEgMPNjH4kgobe7+WIL0yJEHku+H3qtQ==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/webpack-cli": { + "version": "5.1.4", + "resolved": "https://registry.npmmirror.com/webpack-cli/-/webpack-cli-5.1.4.tgz", + "integrity": "sha512-pIDJHIEI9LR0yxHXQ+Qh95k2EvXpWzZ5l+d+jIo+RdSm9MiHfzazIxwwni/p7+x4eJZuvG1AJwgC4TNQ7NRgsg==", + "dependencies": { + "@discoveryjs/json-ext": "^0.5.0", + "@webpack-cli/configtest": "^2.1.1", + "@webpack-cli/info": "^2.0.2", + "@webpack-cli/serve": "^2.0.5", + "colorette": "^2.0.14", + "commander": "^10.0.1", + "cross-spawn": "^7.0.3", + "envinfo": "^7.7.3", + "fastest-levenshtein": "^1.0.12", + "import-local": "^3.0.2", + "interpret": "^3.1.1", + "rechoir": "^0.8.0", + "webpack-merge": "^5.7.3" + }, + "bin": { + "webpack-cli": "bin/cli.js" + }, + "engines": { + "node": ">=14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "5.x.x" + }, + "peerDependenciesMeta": { + "@webpack-cli/generators": { + "optional": true + }, + "webpack-bundle-analyzer": { + "optional": true + }, + "webpack-dev-server": { + "optional": true + } + } + }, + "node_modules/webpack-cli/node_modules/commander": { + "version": "10.0.1", + "resolved": "https://registry.npmmirror.com/commander/-/commander-10.0.1.tgz", + "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", + "engines": { + "node": ">=14" + } + }, + "node_modules/webpack-dev-middleware": { + "version": "5.3.4", + "resolved": "https://registry.npmmirror.com/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz", + "integrity": "sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==", + "dev": true, + "dependencies": { + "colorette": "^2.0.10", + "memfs": "^3.4.3", + "mime-types": "^2.1.31", + "range-parser": "^1.2.1", + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/webpack-dev-middleware/node_modules/ajv": { + "version": "8.13.0", + "resolved": "https://registry.npmmirror.com/ajv/-/ajv-8.13.0.tgz", + "integrity": "sha512-PRA911Blj99jR5RMeTunVbNXMF6Lp4vZXnk5GQjcnUWUTsrXtekg/pnmFFI2u/I36Y/2bITGS30GZCXei6uNkA==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.3", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.4.1" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/webpack-dev-middleware/node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/webpack-dev-middleware/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "dev": true + }, + "node_modules/webpack-dev-middleware/node_modules/schema-utils": { + "version": "4.2.0", + "resolved": "https://registry.npmmirror.com/schema-utils/-/schema-utils-4.2.0.tgz", + "integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/webpack-dev-server": { + "version": "4.15.2", + "resolved": "https://registry.npmmirror.com/webpack-dev-server/-/webpack-dev-server-4.15.2.tgz", + "integrity": "sha512-0XavAZbNJ5sDrCbkpWL8mia0o5WPOd2YGtxrEiZkBK9FjLppIUK2TgxK6qGD2P3hUXTJNNPVibrerKcx5WkR1g==", + "dev": true, + "dependencies": { + "@types/bonjour": "^3.5.9", + "@types/connect-history-api-fallback": "^1.3.5", + "@types/express": "^4.17.13", + "@types/serve-index": "^1.9.1", + "@types/serve-static": "^1.13.10", + "@types/sockjs": "^0.3.33", + "@types/ws": "^8.5.5", + "ansi-html-community": "^0.0.8", + "bonjour-service": "^1.0.11", + "chokidar": "^3.5.3", + "colorette": "^2.0.10", + "compression": "^1.7.4", + "connect-history-api-fallback": "^2.0.0", + "default-gateway": "^6.0.3", + "express": "^4.17.3", + "graceful-fs": "^4.2.6", + "html-entities": "^2.3.2", + "http-proxy-middleware": "^2.0.3", + "ipaddr.js": "^2.0.1", + "launch-editor": "^2.6.0", + "open": "^8.0.9", + "p-retry": "^4.5.0", + "rimraf": "^3.0.2", + "schema-utils": "^4.0.0", + "selfsigned": "^2.1.1", + "serve-index": "^1.9.1", + "sockjs": "^0.3.24", + "spdy": "^4.0.2", + "webpack-dev-middleware": "^5.3.4", + "ws": "^8.13.0" + }, + "bin": { + "webpack-dev-server": "bin/webpack-dev-server.js" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.37.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "webpack": { + "optional": true + }, + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-dev-server/node_modules/ajv": { + "version": "8.13.0", + "resolved": "https://registry.npmmirror.com/ajv/-/ajv-8.13.0.tgz", + "integrity": "sha512-PRA911Blj99jR5RMeTunVbNXMF6Lp4vZXnk5GQjcnUWUTsrXtekg/pnmFFI2u/I36Y/2bITGS30GZCXei6uNkA==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.3", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.4.1" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/webpack-dev-server/node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/webpack-dev-server/node_modules/ipaddr.js": { + "version": "2.2.0", + "resolved": "https://registry.npmmirror.com/ipaddr.js/-/ipaddr.js-2.2.0.tgz", + "integrity": "sha512-Ag3wB2o37wslZS19hZqorUnrnzSkpOVy+IiiDEiTqNubEYpYuHWIf6K4psgN2ZWKExS4xhVCrRVfb/wfW8fWJA==", + "dev": true, + "engines": { + "node": ">= 10" + } + }, + "node_modules/webpack-dev-server/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "dev": true + }, + "node_modules/webpack-dev-server/node_modules/schema-utils": { + "version": "4.2.0", + "resolved": "https://registry.npmmirror.com/schema-utils/-/schema-utils-4.2.0.tgz", + "integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/webpack-dev-server/node_modules/ws": { + "version": "8.17.0", + "resolved": "https://registry.npmmirror.com/ws/-/ws-8.17.0.tgz", + "integrity": "sha512-uJq6108EgZMAl20KagGkzCKfMEjxmKvZHG7Tlq0Z6nOky7YF7aq4mOx6xK8TJ/i1LeK4Qus7INktacctDgY8Ow==", + "dev": true, + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/webpack-merge": { + "version": "5.10.0", + "resolved": "https://registry.npmmirror.com/webpack-merge/-/webpack-merge-5.10.0.tgz", + "integrity": "sha512-+4zXKdx7UnO+1jaN4l2lHVD+mFvnlZQP/6ljaJVb4SZiwIKeUnrT5l0gkT8z+n4hKpC+jpOv6O9R+gLtag7pSA==", + "dependencies": { + "clone-deep": "^4.0.1", + "flat": "^5.0.2", + "wildcard": "^2.0.0" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/webpack-sources": { + "version": "3.2.3", + "resolved": "https://registry.npmmirror.com/webpack-sources/-/webpack-sources-3.2.3.tgz", + "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/webpack-virtual-modules": { + "version": "0.4.6", + "resolved": "https://registry.npmmirror.com/webpack-virtual-modules/-/webpack-virtual-modules-0.4.6.tgz", + "integrity": "sha512-5tyDlKLqPfMqjT3Q9TAqf2YqjwmnUleZwzJi1A5qXnlBCdj2AtOJ6wAWdglTIDOPgOiOrXeBeFcsQ8+aGQ6QbA==", + "dev": true + }, + "node_modules/webpack/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmmirror.com/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/webpack/node_modules/tapable": { + "version": "2.2.1", + "resolved": "https://registry.npmmirror.com/tapable/-/tapable-2.2.1.tgz", + "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/websocket": { + "version": "1.0.35", + "resolved": "https://registry.npmmirror.com/websocket/-/websocket-1.0.35.tgz", + "integrity": "sha512-/REy6amwPZl44DDzvRCkaI1q1bIiQB0mEFQLUrhz3z2EK91cp3n72rAjUlrTP0zV22HJIUOVHQGPxhFRjxjt+Q==", + "dependencies": { + "bufferutil": "^4.0.1", + "debug": "^2.2.0", + "es5-ext": "^0.10.63", + "typedarray-to-buffer": "^3.1.5", + "utf-8-validate": "^5.0.2", + "yaeti": "^0.0.6" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/websocket-driver": { + "version": "0.7.4", + "resolved": "https://registry.npmmirror.com/websocket-driver/-/websocket-driver-0.7.4.tgz", + "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", + "dev": true, + "dependencies": { + "http-parser-js": ">=0.5.1", + "safe-buffer": ">=5.1.0", + "websocket-extensions": ">=0.1.1" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/websocket-extensions": { + "version": "0.1.4", + "resolved": "https://registry.npmmirror.com/websocket-extensions/-/websocket-extensions-0.1.4.tgz", + "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==", + "dev": true, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/websocket/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmmirror.com/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/websocket/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/whatwg-encoding": { + "version": "1.0.5", + "resolved": "https://registry.npmmirror.com/whatwg-encoding/-/whatwg-encoding-1.0.5.tgz", + "integrity": "sha512-b5lim54JOPN9HtzvK9HFXvBma/rnfFeqsic0hSpjtDbVxR3dJKLc+KB4V6GgiGOvl7CY/KNh8rxSo9DKQrnUEw==", + "dev": true, + "dependencies": { + "iconv-lite": "0.4.24" + } + }, + "node_modules/whatwg-fetch": { + "version": "3.6.20", + "resolved": "https://registry.npmmirror.com/whatwg-fetch/-/whatwg-fetch-3.6.20.tgz", + "integrity": "sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg==", + "dev": true + }, + "node_modules/whatwg-mimetype": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/whatwg-mimetype/-/whatwg-mimetype-3.0.0.tgz", + "integrity": "sha512-nt+N2dzIutVRxARx1nghPKGv1xHikU7HKdfafKkLNLindmPU/ch3U31NOCGGA/dmPcmb1VlofO0vnKAcsm0o/Q==", + "engines": { + "node": ">=12" + } + }, + "node_modules/whatwg-url": { + "version": "8.7.0", + "resolved": "https://registry.npmmirror.com/whatwg-url/-/whatwg-url-8.7.0.tgz", + "integrity": "sha512-gAojqb/m9Q8a5IV96E3fHJM70AzCkgt4uXYX2O7EmuyOnLrViCQlsEBmF9UQIu3/aeAIp2U17rtbpZWNntQqdg==", + "dev": true, + "dependencies": { + "lodash": "^4.7.0", + "tr46": "^2.1.0", + "webidl-conversions": "^6.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/which-typed-array": { + "version": "1.1.15", + "resolved": "https://registry.npmmirror.com/which-typed-array/-/which-typed-array-1.1.15.tgz", + "integrity": "sha512-oV0jmFtUky6CXfkqehVvBP/LSWJ2sy4vWMioiENyJLePrBO/yKyV9OyJySfAKosh+RYkIl5zJCNZ8/4JncrpdA==", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/wide-align": { + "version": "1.1.5", + "resolved": "https://registry.npmmirror.com/wide-align/-/wide-align-1.1.5.tgz", + "integrity": "sha512-eDMORYaPNZ4sQIuuYPDHdQvf4gyCF9rEEV/yPxGfwPkRodwEgiMUUXTx/dex+Me0wxx53S+NgUHaP7y3MGlDmg==", + "optional": true, + "dependencies": { + "string-width": "^1.0.2 || 2 || 3 || 4" + } + }, + "node_modules/widest-line": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/widest-line/-/widest-line-3.1.0.tgz", + "integrity": "sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg==", + "dependencies": { + "string-width": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wildcard": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/wildcard/-/wildcard-2.0.1.tgz", + "integrity": "sha512-CC1bOL87PIWSBhDcTrdeLo6eGT7mCFtrg0uIJtqJUFyK+eJnzl8A1niH56uu7KMa5XFrtiV+AQuHO3n7DsHnLQ==" + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmmirror.com/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/worker-farm": { + "version": "1.7.0", + "resolved": "https://registry.npmmirror.com/worker-farm/-/worker-farm-1.7.0.tgz", + "integrity": "sha512-rvw3QTZc8lAxyVrqcSGVm5yP/IJ2UcB3U0graE3LCFoZ0Yn2x4EoVSqJKdB/T5M+FLcRPjz4TDacRf3OCfNUzw==", + "peer": true, + "dependencies": { + "errno": "~0.1.7" + } + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmmirror.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmmirror.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/wrap-ansi/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + }, + "node_modules/write-file-atomic": { + "version": "3.0.3", + "resolved": "https://registry.npmmirror.com/write-file-atomic/-/write-file-atomic-3.0.3.tgz", + "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", + "dev": true, + "dependencies": { + "imurmurhash": "^0.1.4", + "is-typedarray": "^1.0.0", + "signal-exit": "^3.0.2", + "typedarray-to-buffer": "^3.1.5" + } + }, + "node_modules/ws": { + "version": "7.5.9", + "resolved": "https://registry.npmmirror.com/ws/-/ws-7.5.9.tgz", + "integrity": "sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q==", + "engines": { + "node": ">=8.3.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": "^5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/xml-name-validator": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/xml-name-validator/-/xml-name-validator-3.0.0.tgz", + "integrity": "sha512-A5CUptxDsvxKJEU3yO6DuWBSJz/qizqzJKOMIfUJHETbBw/sFaDxgd6fxm1ewUaM0jZ444Fc5vC5ROYurg/4Pw==", + "dev": true + }, + "node_modules/xml2js": { + "version": "0.5.0", + "resolved": "https://registry.npmmirror.com/xml2js/-/xml2js-0.5.0.tgz", + "integrity": "sha512-drPFnkQJik/O+uPKpqSgr22mpuFHqKdbS835iAQrUC73L2F5WkboIRd63ai/2Yg6I1jzifPFKH2NTK+cfglkIA==", + "dependencies": { + "sax": ">=0.6.0", + "xmlbuilder": "~11.0.0" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/xmlbuilder": { + "version": "11.0.1", + "resolved": "https://registry.npmmirror.com/xmlbuilder/-/xmlbuilder-11.0.1.tgz", + "integrity": "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/xmlchars": { + "version": "2.2.0", + "resolved": "https://registry.npmmirror.com/xmlchars/-/xmlchars-2.2.0.tgz", + "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==", + "dev": true + }, + "node_modules/xss": { + "version": "1.0.15", + "resolved": "https://registry.npmmirror.com/xss/-/xss-1.0.15.tgz", + "integrity": "sha512-FVdlVVC67WOIPvfOwhoMETV72f6GbW7aOabBC3WxN/oUdoEMDyLz4OgRv5/gck2ZeNqEQu+Tb0kloovXOfpYVg==", + "dependencies": { + "commander": "^2.20.3", + "cssfilter": "0.0.10" + }, + "bin": { + "xss": "bin/xss" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/xss/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmmirror.com/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" + }, + "node_modules/xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmmirror.com/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "engines": { + "node": ">=0.4" + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmmirror.com/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "engines": { + "node": ">=10" + } + }, + "node_modules/yaeti": { + "version": "0.0.6", + "resolved": "https://registry.npmmirror.com/yaeti/-/yaeti-0.0.6.tgz", + "integrity": "sha512-MvQa//+KcZCUkBTIC9blM+CU9J2GzuTytsOUwf2lidtvkx/6gnEp1QvJv34t9vdjhFmha/mUiNDbN0D0mJWdug==", + "engines": { + "node": ">=0.10.32" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmmirror.com/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" + }, + "node_modules/yaml": { + "version": "1.10.2", + "resolved": "https://registry.npmmirror.com/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, + "node_modules/yaml-front-matter": { + "version": "4.1.1", + "resolved": "https://registry.npmmirror.com/yaml-front-matter/-/yaml-front-matter-4.1.1.tgz", + "integrity": "sha512-ULGbghCLsN8Hs8vfExlqrJIe8Hl2TUjD7/zsIGMP8U+dgRXEsDXk4yydxeZJgdGiimP1XB7zhmhOB4/HyfqOyQ==", + "dependencies": { + "commander": "^6.2.0", + "js-yaml": "^3.14.1" + }, + "bin": { + "yaml-front-matter": "bin/js-yaml-front.js" + } + }, + "node_modules/yaml-front-matter/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmmirror.com/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/yaml-front-matter/node_modules/commander": { + "version": "6.2.1", + "resolved": "https://registry.npmmirror.com/commander/-/commander-6.2.1.tgz", + "integrity": "sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA==", + "engines": { + "node": ">= 6" + } + }, + "node_modules/yaml-front-matter/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmmirror.com/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/yargs": { + "version": "16.2.0", + "resolved": "https://registry.npmmirror.com/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-parser": { + "version": "20.2.9", + "resolved": "https://registry.npmmirror.com/yargs-parser/-/yargs-parser-20.2.9.tgz", + "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", + "engines": { + "node": ">=10" + } + }, + "node_modules/yauzl": { + "version": "2.10.0", + "resolved": "https://registry.npmmirror.com/yauzl/-/yauzl-2.10.0.tgz", + "integrity": "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==", + "dependencies": { + "buffer-crc32": "~0.2.3", + "fd-slicer": "~1.1.0" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmmirror.com/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yorkie": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/yorkie/-/yorkie-2.0.0.tgz", + "integrity": "sha512-jcKpkthap6x63MB4TxwCyuIGkV0oYP/YRyuQU5UO0Yz/E/ZAu+653/uov+phdmO54n6BcvFRyyt0RRrWdN2mpw==", + "dev": true, + "hasInstallScript": true, + "dependencies": { + "execa": "^0.8.0", + "is-ci": "^1.0.10", + "normalize-path": "^1.0.0", + "strip-indent": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/yorkie/node_modules/cross-spawn": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/cross-spawn/-/cross-spawn-5.1.0.tgz", + "integrity": "sha512-pTgQJ5KC0d2hcY8eyL1IzlBPYjTkyH72XRZPnLyKus2mBfNjQs3klqbJU2VILqZryAZUt9JOb3h/mWMy23/f5A==", + "dev": true, + "dependencies": { + "lru-cache": "^4.0.1", + "shebang-command": "^1.2.0", + "which": "^1.2.9" + } + }, + "node_modules/yorkie/node_modules/execa": { + "version": "0.8.0", + "resolved": "https://registry.npmmirror.com/execa/-/execa-0.8.0.tgz", + "integrity": "sha512-zDWS+Rb1E8BlqqhALSt9kUhss8Qq4nN3iof3gsOdyINksElaPyNBtKUMTR62qhvgVWR0CqCX7sdnKe4MnUbFEA==", + "dev": true, + "dependencies": { + "cross-spawn": "^5.0.1", + "get-stream": "^3.0.0", + "is-stream": "^1.1.0", + "npm-run-path": "^2.0.0", + "p-finally": "^1.0.0", + "signal-exit": "^3.0.0", + "strip-eof": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/yorkie/node_modules/lru-cache": { + "version": "4.1.5", + "resolved": "https://registry.npmmirror.com/lru-cache/-/lru-cache-4.1.5.tgz", + "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", + "dev": true, + "dependencies": { + "pseudomap": "^1.0.2", + "yallist": "^2.1.2" + } + }, + "node_modules/yorkie/node_modules/normalize-path": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/normalize-path/-/normalize-path-1.0.0.tgz", + "integrity": "sha512-7WyT0w8jhpDStXRq5836AMmihQwq2nrUVQrgjvUo/p/NZf9uy/MeJ246lBJVmWuYXMlJuG9BNZHF0hWjfTbQUA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/yorkie/node_modules/shebang-command": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/shebang-command/-/shebang-command-1.2.0.tgz", + "integrity": "sha512-EV3L1+UQWGor21OmnvojK36mhg+TyIKDh3iFBKBohr5xeXIhNBcx8oWdgkTEEQ+BEFFYdLRuqMfd5L84N1V5Vg==", + "dev": true, + "dependencies": { + "shebang-regex": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/yorkie/node_modules/shebang-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/shebang-regex/-/shebang-regex-1.0.0.tgz", + "integrity": "sha512-wpoSFAxys6b2a2wHZ1XpDSgD7N9iVjg29Ph9uV/uaP9Ex/KXlkTZTeddxDPSYQpgvzKLGJke2UU0AzoGCjNIvQ==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/yorkie/node_modules/which": { + "version": "1.3.1", + "resolved": "https://registry.npmmirror.com/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "which": "bin/which" + } + }, + "node_modules/yorkie/node_modules/yallist": { + "version": "2.1.2", + "resolved": "https://registry.npmmirror.com/yallist/-/yallist-2.1.2.tgz", + "integrity": "sha512-ncTzHV7NvsQZkYe1DW7cbDLm0YpzHmZF5r/iyP3ZnQtMiJ+pjzisCiMNI+Sj+xQF5pXhSHxSB3uDbsBTzY/c2A==", + "dev": true + } + } +} diff --git a/ktransformers/website/package.json b/ktransformers/website/package.json new file mode 100644 index 0000000..8de2c2c --- /dev/null +++ b/ktransformers/website/package.json @@ -0,0 +1,61 @@ +{ + "name": "", + "version": "", + "private": true, + "scripts": { + "serve": "vue-cli-service serve", + "build": "vue-cli-service build", + "test:unit": "vue-cli-service test:unit", + "lint": "vue-cli-service lint" + }, + "dependencies": { + "@types/pdfjs-dist": "^2.10.378", + "@types/websocket": "^1.0.10", + "@vue/cli": "^5.0.8", + "ant-design-vue": "^4.2.1", + "apexcharts": "^3.49.1", + "axios": "^1.7.0", + "axios-extensions": "^3.1.6", + "better-scroll": "^2.5.1", + "element-plus": "^2.7.3", + "marked": "^12.0.2", + "marked-highlight": "^2.1.1", + "pdf-lib": "^1.17.1", + "pdfobject": "^2.3.0", + "v-clipboard": "^3.0.0-next.1", + "vue": "^3.4.27", + "vue-i18n": "^9.13.1", + "vue-pdf": "^4.3.0", + "vue-router": "^4.0.3", + "vue3-apexcharts": "^1.5.3", + "vuex": "^4.0.0", + "webpack": "^5.91.0", + "webpack-cli": "^5.1.4", + "websocket": "^1.0.35" + }, + "devDependencies": { + "@types/jest": "^27.0.1", + "@types/pdfobject": "^2.2.5", + "@typescript-eslint/eslint-plugin": "^5.4.0", + "@typescript-eslint/parser": "^5.4.0", + "@vue/cli-plugin-eslint": "~5.0.0", + "@vue/cli-plugin-router": "~5.0.0", + "@vue/cli-plugin-typescript": "~5.0.0", + "@vue/cli-plugin-unit-jest": "~5.0.0", + "@vue/cli-plugin-vuex": "~5.0.0", + "@vue/cli-service": "~5.0.0", + "@vue/eslint-config-typescript": "^9.1.0", + "@vue/test-utils": "^2.0.0-0", + "@vue/vue3-jest": "^27.0.0-alpha.1", + "babel-jest": "^27.0.6", + "eslint": "^7.32.0", + "eslint-plugin-vue": "^8.0.3", + "jest": "^27.0.5", + "stylus": "^0.55.0", + "stylus-loader": "^6.1.0", + "ts-jest": "^27.0.4", + "typescript": "~4.5.5" + }, + "_id": "@", + "readme": "ERROR: No README data found!" +} diff --git a/ktransformers/website/public/balck.ico b/ktransformers/website/public/balck.ico new file mode 100644 index 0000000..56091e6 Binary files /dev/null and b/ktransformers/website/public/balck.ico differ diff --git a/ktransformers/website/public/config.js b/ktransformers/website/public/config.js new file mode 100644 index 0000000..7a683c7 --- /dev/null +++ b/ktransformers/website/public/config.js @@ -0,0 +1,4 @@ +window.configWeb = { + apiUrl: 'http://119.255.238.12:15670/v1', + port: 8080, + }; \ No newline at end of file diff --git a/ktransformers/website/public/css/reset.css b/ktransformers/website/public/css/reset.css new file mode 100644 index 0000000..c88687a --- /dev/null +++ b/ktransformers/website/public/css/reset.css @@ -0,0 +1,67 @@ +html, body, div, span, applet, object, iframe, +h1, h2, h3, h4, h5, h6, p, blockquote, pre, +a, abbr, acronym, address, big, cite, code, +del, dfn, em, img, ins, kbd, q, s, samp, +small, strike, strong, sub, sup, tt, var, +b, u, i, center, +dl, dt, dd, ol, ul, li, +fieldset, form, label, legend,textarea, +table, caption, tbody, tfoot, thead, tr, th, td, +article, aside, canvas, details, embed, +figure, figcaption, footer, header, hgroup, +menu, nav, output, ruby, section, summary, +time, mark, audio, video { + margin: 0; + padding: 0; + border: 0; + font-size: 100%; + *font: inherit; + font-family: Arial, Microsoft YaHei, SimHei, Tahoma, sans-serif !important; + vertical-align: baseline; +} +/* HTML5 display-role reset for older browsers */ +article, aside, details, figcaption, figure, +footer, header, hgroup, menu, nav, section { + display: block; +} +body { + line-height: 1; + -webkit-text-size-adjust: 100%!important; + margin: 0; +} +html,body { + height: 100%; + width: 100%; + overflow: hidden; +} +ol, ul { + list-style: none; +} +blockquote, q { + quotes: none; +} +blockquote:before, blockquote:after, +q:before, q:after { + content: ''; + content: none; +} +table { + border-collapse: collapse; + border-spacing: 0; +} + +.clearfix:before, +.clearfix:after { + content:""; + display:table +} +.clearfix:after { + clear:both +} + +/*ๆ˜พ็คบ็œ็•ฅๅท*/ +.ellipsis{ + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} diff --git a/ktransformers/website/public/images/assistant-avatar.png b/ktransformers/website/public/images/assistant-avatar.png new file mode 100644 index 0000000..1142540 Binary files /dev/null and b/ktransformers/website/public/images/assistant-avatar.png differ diff --git a/ktransformers/website/public/images/avatar.png b/ktransformers/website/public/images/avatar.png new file mode 100644 index 0000000..f11513a Binary files /dev/null and b/ktransformers/website/public/images/avatar.png differ diff --git a/ktransformers/website/public/images/bgbg.png b/ktransformers/website/public/images/bgbg.png new file mode 100644 index 0000000..39a084a Binary files /dev/null and b/ktransformers/website/public/images/bgbg.png differ diff --git a/ktransformers/website/public/images/logo.ico b/ktransformers/website/public/images/logo.ico new file mode 100644 index 0000000..87d6037 Binary files /dev/null and b/ktransformers/website/public/images/logo.ico differ diff --git a/ktransformers/website/public/images/logo.png b/ktransformers/website/public/images/logo.png new file mode 100644 index 0000000..68dc683 Binary files /dev/null and b/ktransformers/website/public/images/logo.png differ diff --git a/ktransformers/website/public/images/three.png b/ktransformers/website/public/images/three.png new file mode 100644 index 0000000..d36e188 Binary files /dev/null and b/ktransformers/website/public/images/three.png differ diff --git a/ktransformers/website/public/images/user-filling.png b/ktransformers/website/public/images/user-filling.png new file mode 100644 index 0000000..92b1f52 Binary files /dev/null and b/ktransformers/website/public/images/user-filling.png differ diff --git a/ktransformers/website/public/index.html b/ktransformers/website/public/index.html new file mode 100644 index 0000000..6bc7877 --- /dev/null +++ b/ktransformers/website/public/index.html @@ -0,0 +1,19 @@ + + + + + + + + + + KTransformers + + + +
+ + + diff --git a/ktransformers/website/src/App.vue b/ktransformers/website/src/App.vue new file mode 100644 index 0000000..1fd8ccf --- /dev/null +++ b/ktransformers/website/src/App.vue @@ -0,0 +1,19 @@ + + + + + \ No newline at end of file diff --git a/ktransformers/website/src/api/api-client.ts b/ktransformers/website/src/api/api-client.ts new file mode 100644 index 0000000..6d628ce --- /dev/null +++ b/ktransformers/website/src/api/api-client.ts @@ -0,0 +1,11 @@ +import axios, { AxiosInstance } from 'axios'; +import {baseURL} from '@/conf/config'; +const apiClient: AxiosInstance = axios.create({ + baseURL: baseURL, + // baseURL: '/api', + headers: { + 'Content-Type': 'application/json', + }, + withCredentials: true, +}); +export default apiClient; diff --git a/ktransformers/website/src/api/assistant.ts b/ktransformers/website/src/api/assistant.ts new file mode 100644 index 0000000..e0f2f8b --- /dev/null +++ b/ktransformers/website/src/api/assistant.ts @@ -0,0 +1,184 @@ +import apiClient from './api-client'; +import { IAssistant,IDeleteResult, IAssistantWithStatus } from '../utils/types'; +function filterAndConvert( + assistantsWithStatus: IAssistantWithStatus[], + statusCondition: string + ): IAssistant[] { + return assistantsWithStatus + .filter((assistant) => assistant.build_status.status === statusCondition) + .map(({ build_status, ...rest }) => rest); + } + +interface IAssistantData { + model: string; + prefix_system_prompt?: string; + suffix_system_prompt?: string; + name?: string; + description?: string; + tools?: any[]; + tool_resources?: object; + metadata?:{[key:string]:any} + top_p?: number; + temperature?: number; + response_format?: string; + instructions?: string; +} + +export const createAssistant = async (data: IAssistantData): Promise => { + const assistant_data: { + model: string; + instructions?: string; + name?: string; + description?: string; + tools?: any[]; + tool_resources?: object; + metadata?:{[key:string]:any} + top_p?: number; + temperature?: number; + response_format?: string; + } = { + model: data.model + }; + + if (data.prefix_system_prompt) { + assistant_data.instructions = data.prefix_system_prompt; + } + if (data.suffix_system_prompt) { + assistant_data.instructions = data.suffix_system_prompt; + } + if (data.name) { + assistant_data.name = data.name; + } + if (data.description) { + assistant_data.description = data.description; + } + if (data.tools) { + assistant_data.tools = data.tools; + } + if (data.tool_resources) { + assistant_data.tool_resources = data.tool_resources; + } + if (data.metadata) { + assistant_data.metadata = data.metadata + } + if (typeof data.top_p !== 'undefined') { + assistant_data.top_p = data.top_p; + } + if (typeof data.temperature !== 'undefined') { + assistant_data.temperature = data.temperature; + } + if (data.response_format) { + assistant_data.response_format = data.response_format; + } + if (data.instructions) { + assistant_data.instructions = data.instructions; + } + console.log(assistant_data) + const response = await apiClient.post( + '/assistants/', + assistant_data + ); + console.log("response", response) + return response.data; +}; + + +export const listAssistants = async ( + limit?: number, + order?: string, + after?: string, + before?: string, + run_id?: string, +): Promise => { + const params: { + limit?: number, + order?: string, + after?: string, + before?: string, + run_id?: string + } = {}; + + if (typeof limit !== 'undefined') { + params.limit = limit; + } + if (typeof order !== 'undefined') { + params.order = order; + } + if (typeof after !== 'undefined') { + params.after = after; + } + if (typeof before !== 'undefined') { + params.before = before; + } + if (typeof run_id !== 'undefined') { + params.run_id = run_id; + } + const response = await apiClient.get('/assistants/status', { + params + }); + let tmp = response.data + let result = [] as IAssistant[] + const filteredAssistants = filterAndConvert(tmp, 'completed'); + return filteredAssistants +}; + +export const getAssistant = async ( + assistant_id: string +): Promise => { + const response = await apiClient.get(`/assistants/${assistant_id}`); + return response.data; +} + +export const deleteAssistant = async ( + assistant_id: string +): Promise => { + const response = await apiClient.delete(`/assistants/${assistant_id}`); + return response.data; +} + +export const getRelatedThreadId = async ( + assistant_id: string +): Promise => { + const response = await apiClient.get(`/assistants/${assistant_id}/related_thread`); + return response.data; +} + +export const listAssistantsWithStatus = async ( + limit?: number, + order?: string, + after?: string, + before?: string, + run_id?: string, +): Promise => { + const params: { + limit?: number, + order?: string, + after?: string, + before?: string, + run_id?: string + } = {}; + + if (typeof limit !== 'undefined') { + params.limit = limit; + } + if (typeof order !== 'undefined') { + params.order = order; + } + if (typeof after !== 'undefined') { + params.after = after; + } + if (typeof before !== 'undefined') { + params.before = before; + } + if (typeof run_id !== 'undefined') { + params.run_id = run_id; + } + console.log(params) + const response = await apiClient.get('/assistants/status', { + params + }); + + return response.data; +}; + + diff --git a/ktransformers/website/src/api/message.ts b/ktransformers/website/src/api/message.ts new file mode 100644 index 0000000..4c91128 --- /dev/null +++ b/ktransformers/website/src/api/message.ts @@ -0,0 +1,75 @@ +import apiClient from './api-client'; +import { IMessage,IDeleteResult } from '../utils/types'; + +export const createMessage = async ( + thread_id: string, + content: string, + role?: string, + attachments?: any[], + metadata?:{[key:string]:any} +): Promise => { + const message_data: { + content: string; + role?: string; + attachments?: any[]; + metadata?:{[key:string]:any} + } = { + content, + }; + + if (metadata) { + message_data.metadata = metadata; + } + if (role) { + message_data.role = role; + } + if (attachments) { + message_data.attachments = attachments; + } + const response = await apiClient.post(`/threads/${thread_id}/messages`, message_data); + return response.data; +}; + + +export const listMessages = async ( + thread_id: string, + limit?: number, + order?: string, + after?: string, + before?: string, + run_id?: string, +): Promise => { + const params: { + limit?: number, + order?: string, + after?: string, + before?: string, + run_id?: string + } = {}; + + if (typeof limit !== 'undefined') { + params.limit = limit; + } + if (typeof order !== 'undefined') { + params.order = order; + } + if (typeof after !== 'undefined') { + params.after = after; + } + if (typeof before !== 'undefined') { + params.before = before; + } + if (typeof run_id !== 'undefined') { + params.run_id = run_id; + } + + const response = await apiClient.get(`/threads/${thread_id}/messages`, { + params + }); + + return response.data; +}; +export const deleteMessage = async(thread_id:string, message_id:string): Promise => { + const response = await apiClient.delete(`/threads/${thread_id}/messages/${message_id}`); + return response.data; +} diff --git a/ktransformers/website/src/api/run.ts b/ktransformers/website/src/api/run.ts new file mode 100644 index 0000000..a21fab9 --- /dev/null +++ b/ktransformers/website/src/api/run.ts @@ -0,0 +1,106 @@ +import apiClient from './api-client'; +import { IRun } from '../utils/types'; +import {baseURL} from '@/conf/config'; +interface IRunData { + assistant_id: string; + model?: string; + instructions?: string; + additional_instructions?: string; + additional_messages?: any[]; + tools?: any[]; + metadata?: { [key: string]: any } + temperature?: number; + top_p?: number; + stream?: boolean; + max_prompt_tokens?: number; + max_completion_tokens?: number; + truncation_strategy?: object; + tool_choice?: string; + response_format?: string | object; +} + + +export async function* createRun( + data: IRunData, + thread_id: string +): AsyncGenerator { + const run_data = { + ...data, + assistant_id: data.assistant_id, + }; + + const response = await fetch(`${baseURL}/threads/${thread_id}/runs`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(run_data), + }); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + if (!response.body) { + throw new Error('Response body is missing'); + } + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ''; + try { + while (true) { + const { done, value } = await reader.read(); + if (done) return; + buffer += decoder.decode(value, { stream: true }); + + let eventIndex = buffer.indexOf("\n\n"); + while (eventIndex !== -1) { + const event = buffer.slice(0, eventIndex); + buffer = buffer.slice(eventIndex + 2); + if (event.startsWith("event: thread.run.created")) { + const dataIndex = event.indexOf("data: "); + if (dataIndex !== -1) { + const datads = event.slice(39, 75) + yield datads; + } + } else if (event.startsWith("event: thread.message.delta")) { + const dataIndex = event.indexOf("data: "); + if (dataIndex !== -1) { + const data = JSON.parse(event.slice(dataIndex + 6)); + yield data.delta.content[0].text.value || ''; + } + } else if (event.startsWith("event: done")) { + return; + } + + eventIndex = buffer.indexOf("\n\n"); + } + } + } catch (e) { + + console.error('An error occurred while reading the response stream:', e); + // throw e; + return e + } +} +// ๅฎšไน‰ๅ–ๆถˆ่ฟ่กŒ็š„ๅ‡ฝๆ•ฐ +export async function cancelRun(threadId: string, runId: string){ + const run_data = { + thread_id:threadId, + run_id:runId, + }; + try { + const response = await fetch(`${baseURL}/threads/${threadId}/runs/${runId}/cancel`, { + method: 'POST', + }); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + return response; + } catch (error) { + console.error('An error occurred while cancelling the run:', error); + throw error; + } +} \ No newline at end of file diff --git a/ktransformers/website/src/api/thread.ts b/ktransformers/website/src/api/thread.ts new file mode 100644 index 0000000..086571d --- /dev/null +++ b/ktransformers/website/src/api/thread.ts @@ -0,0 +1,48 @@ +import apiClient from './api-client'; +import { IThread, IMessage, IThreadAndMessageAndAssistant, IDeleteResult } from '../utils/types'; +export const createThread = async ( + message?: IMessage, + tool_resources?: object, + metadata?: { [key: string]: any } +): Promise => { + const thread_data: { message?: object, metadata?: { [key: string]: any } } = {}; + if (message) { + thread_data.message = message; + } + if (metadata) { + thread_data.metadata = metadata; + } + const response = await apiClient.post( + '/threads', + thread_data); + return response.data; +}; + +export const listThreads = async ( + limit?: number, + order?: string, +): Promise => { + const params: { + limit?: number, + order?: string, + } = { limit, order }; + const response = await apiClient.get('/threads', { + params + }); + + return response.data; +}; + +export const deleteThread = async ( + thread_id: string +): Promise => { + const response = await apiClient.delete(`/threads/${thread_id}`); + return response.data; +} + +export const getThread = async ( + thread_id: string +): Promise => { + const response = await apiClient.get(`/threads/${thread_id}`); + return response.data; +} \ No newline at end of file diff --git a/ktransformers/website/src/assets/css/mixins.styl b/ktransformers/website/src/assets/css/mixins.styl new file mode 100644 index 0000000..24039bd --- /dev/null +++ b/ktransformers/website/src/assets/css/mixins.styl @@ -0,0 +1,161 @@ + +/*Define color variables*/ +$bg_gray_light_normal = #F9F9F9 +$bg_gray_light_hover = #E8E8E8 +$bg_gray_light_active = #E8E8E8 + +$border_gray_light_normal = rgba(0, 0, 0, .15) +$border_gray_light_hover = #8080FF + +$gray_20 = #333333 +$gray_40 = #585858 +$gray_50 = #7F7F7F +$gray_60 = #9F9F9F +$gray_70 = #BFBFBF +$gray_80 = #DFDFDF +$gray_85 = #F2F2F2 +$gray_90 = #F7F7F7 + +$gray = #53525B +$gray_dark = #42414a +$gray_hover = #121212 +$gray_action = #6C757D + +$primary = #409eff +$primary_hover = #428bca +$primary_middle = #9DDDF9 +$primary_light = #D4F0FC + +$cyan = #66CCCC +$cyan_hover = #46C2C2 + + +/*Define common modules*/ +$input-duration = .25s +input-border() + -webkit-transition: border-color ease-in-out $input-duration,-webkit-box-shadow ease-in-out $input-duration + -o-transition: border-color ease-in-out $input-duration,box-shadow ease-in-out $input-duration + transition: border-color ease-in-out $input-duration,box-shadow ease-in-out $input-duration +input-focus() + border-color: #66afe9 + outline: 0 + z-index: 100 + -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6) + box-shadow: inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6) + + +/*Define common class*/ +.flex-column + display: -webkit-box + display: -webkit-flex + display: flex + box-sizing: border-box + -webkit-box-orient: vertical + -webkit-box-direction: normal + -webkit-flex-direction: column + flex-direction: column + height: 100% + +.flex-row + position: relative + display: -webkit-box + display: -ms-flexbox + display: flex + box-sizing: border-box + -webkit-box-align: center + -ms-flex-align: center + align-items: center + +.flex-unit + -webkit-box-flex: 1 + -ms-flex: 1 + flex: 1 + // overflow: hidden + +.clearfix + &:after + clear: both + content: "\20" + display: block + height: 0 + visibility: hidden + +a,a:hover + text-decoration:none + +button:focus + outline: none + +.btn + display: inline-block + margin-bottom: 0 + padding:0px 15px + font-size: 14px + height: 34px + line-height: 32px + float: left /*ๅŽปๆމinline-blockไน‹้—ด็š„็ฉบๆ ผ*/ + font-weight: normal + text-align: center + white-space: nowrap + vertical-align: middle + cursor: pointer + background-image: none + border-radius: 3px + -webkit-user-select: none + -moz-user-select: none + -ms-user-select: none + -o-user-select: none + user-select: none + &:hover + .dropdown-list + display: block + i + font-size: 16px + .text + float: right + margin-left: 3px + +.btn-gray + color: $gray_action + background-color: #FFFFFF + border: 1px solid $gray_action + &:not(.is-disabled):hover + color: #FFFFFF + background-color: $gray_action + border: 1px solid $gray_action + +.btn-primary + color: #FFFFFF + background-color: $primary + border: 1px solid $primary + &:not(.is-disabled):hover + color: #FFFFFF + background-color: $primary_hover + border: 1px solid $primary_hover + +.chat-box + position: relative + .chat-input + border: 1px solid $border_gray_light_normal + height: 48px + line-height: 48px + font-size: 16px + outline: 0 + box-sizing: border-box + padding:0 30px0 20px + color: #7F7F7F + width: 800px + border-radius: 12px + position: relative + &:focus + input-focus() + i + position: absolute + font-size: 26px + right: 13px + bottom:0px + color: $border_gray_light_normal + z-index: 100 + cursor: pointer + &:hover + color: $border_gray_light_hover diff --git a/ktransformers/website/src/assets/iconfont/demo.css b/ktransformers/website/src/assets/iconfont/demo.css new file mode 100644 index 0000000..4fe0312 --- /dev/null +++ b/ktransformers/website/src/assets/iconfont/demo.css @@ -0,0 +1,539 @@ +/* Logo ๅญ—ไฝ“ */ +@font-face { + font-family: "iconfont logo"; + src: url('https://at.alicdn.com/t/font_985780_km7mi63cihi.eot?t=1545807318834'); + src: url('https://at.alicdn.com/t/font_985780_km7mi63cihi.eot?t=1545807318834#iefix') format('embedded-opentype'), + url('https://at.alicdn.com/t/font_985780_km7mi63cihi.woff?t=1545807318834') format('woff'), + url('https://at.alicdn.com/t/font_985780_km7mi63cihi.ttf?t=1545807318834') format('truetype'), + url('https://at.alicdn.com/t/font_985780_km7mi63cihi.svg?t=1545807318834#iconfont') format('svg'); +} + +.logo { + font-family: "iconfont logo"; + font-size: 160px; + font-style: normal; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; +} + +/* tabs */ +.nav-tabs { + position: relative; +} + +.nav-tabs .nav-more { + position: absolute; + right: 0; + bottom: 0; + height: 42px; + line-height: 42px; + color: #666; +} + +#tabs { + border-bottom: 1px solid #eee; +} + +#tabs li { + cursor: pointer; + width: 100px; + height: 40px; + line-height: 40px; + text-align: center; + font-size: 16px; + border-bottom: 2px solid transparent; + position: relative; + z-index: 1; + margin-bottom: -1px; + color: #666; +} + + +#tabs .active { + border-bottom-color: #f00; + color: #222; +} + +.tab-container .content { + display: none; +} + +/* ้กต้ขๅธƒๅฑ€ */ +.main { + padding: 30px 100px; + width: 960px; + margin: 0 auto; +} + +.main .logo { + color: #333; + text-align: left; + margin-bottom: 30px; + line-height: 1; + height: 110px; + margin-top: -50px; + overflow: hidden; + *zoom: 1; +} + +.main .logo a { + font-size: 160px; + color: #333; +} + +.helps { + margin-top: 40px; +} + +.helps pre { + padding: 20px; + margin: 10px 0; + border: solid 1px #e7e1cd; + background-color: #fffdef; + overflow: auto; +} + +.icon_lists { + width: 100% !important; + overflow: hidden; + *zoom: 1; +} + +.icon_lists li { + width: 100px; + margin-bottom: 10px; + margin-right: 20px; + text-align: center; + list-style: none !important; + cursor: default; +} + +.icon_lists li .code-name { + line-height: 1.2; +} + +.icon_lists .icon { + display: block; + height: 100px; + line-height: 100px; + font-size: 42px; + margin: 10px auto; + color: #333; + -webkit-transition: font-size 0.25s linear, width 0.25s linear; + -moz-transition: font-size 0.25s linear, width 0.25s linear; + transition: font-size 0.25s linear, width 0.25s linear; +} + +.icon_lists .icon:hover { + font-size: 100px; +} + +.icon_lists .svg-icon { + /* ้€š่ฟ‡่ฎพ็ฝฎ font-size ๆฅๆ”นๅ˜ๅ›พๆ ‡ๅคงๅฐ */ + width: 1em; + /* ๅ›พๆ ‡ๅ’Œๆ–‡ๅญ—็›ธ้‚ปๆ—ถ๏ผŒๅž‚็›ดๅฏน้ฝ */ + vertical-align: -0.15em; + /* ้€š่ฟ‡่ฎพ็ฝฎ color ๆฅๆ”นๅ˜ SVG ็š„้ขœ่‰ฒ/fill */ + fill: currentColor; + /* path ๅ’Œ stroke ๆบขๅ‡บ viewBox ้ƒจๅˆ†ๅœจ IE ไธ‹ไผšๆ˜พ็คบ + normalize.css ไธญไนŸๅŒ…ๅซ่ฟ™่กŒ */ + overflow: hidden; +} + +.icon_lists li .name, +.icon_lists li .code-name { + color: #666; +} + +/* markdown ๆ ทๅผ */ +.markdown { + color: #666; + font-size: 14px; + line-height: 1.8; +} + +.highlight { + line-height: 1.5; +} + +.markdown img { + vertical-align: middle; + max-width: 100%; +} + +.markdown h1 { + color: #404040; + font-weight: 500; + line-height: 40px; + margin-bottom: 24px; +} + +.markdown h2, +.markdown h3, +.markdown h4, +.markdown h5, +.markdown h6 { + color: #404040; + margin: 1.6em 0 0.6em 0; + font-weight: 500; + clear: both; +} + +.markdown h1 { + font-size: 28px; +} + +.markdown h2 { + font-size: 22px; +} + +.markdown h3 { + font-size: 16px; +} + +.markdown h4 { + font-size: 14px; +} + +.markdown h5 { + font-size: 12px; +} + +.markdown h6 { + font-size: 12px; +} + +.markdown hr { + height: 1px; + border: 0; + background: #e9e9e9; + margin: 16px 0; + clear: both; +} + +.markdown p { + margin: 1em 0; +} + +.markdown>p, +.markdown>blockquote, +.markdown>.highlight, +.markdown>ol, +.markdown>ul { + width: 80%; +} + +.markdown ul>li { + list-style: circle; +} + +.markdown>ul li, +.markdown blockquote ul>li { + margin-left: 20px; + padding-left: 4px; +} + +.markdown>ul li p, +.markdown>ol li p { + margin: 0.6em 0; +} + +.markdown ol>li { + list-style: decimal; +} + +.markdown>ol li, +.markdown blockquote ol>li { + margin-left: 20px; + padding-left: 4px; +} + +.markdown code { + margin: 0 3px; + padding: 0 5px; + background: #eee; + border-radius: 3px; +} + +.markdown strong, +.markdown b { + font-weight: 600; +} + +.markdown>table { + border-collapse: collapse; + border-spacing:0; + empty-cells: show; + border: 1px solid #e9e9e9; + width: 95%; + margin-bottom: 24px; +} + +.markdown>table th { + white-space: nowrap; + color: #333; + font-weight: 600; +} + +.markdown>table th, +.markdown>table td { + border: 1px solid #e9e9e9; + padding: 8px 16px; + text-align: left; +} + +.markdown>table th { + background: #F7F7F7; +} + +.markdown blockquote { + font-size: 90%; + color: #999; + border-left: 4px solid #e9e9e9; + padding-left: 0.8em; + margin: 1em 0; +} + +.markdown blockquote p { + margin: 0; +} + +.markdown .anchor { + opacity: 0; + transition: opacity 0.3s ease; + margin-left: 8px; +} + +.markdown .waiting { + color: #ccc; +} + +.markdown h1:hover .anchor, +.markdown h2:hover .anchor, +.markdown h3:hover .anchor, +.markdown h4:hover .anchor, +.markdown h5:hover .anchor, +.markdown h6:hover .anchor { + opacity: 1; + display: inline-block; +} + +.markdown>br, +.markdown>p>br { + clear: both; +} + + +.hljs { + display: block; + background: white; + padding: 0.5em; + color: #333333; + overflow-x: auto; +} + +.hljs-comment, +.hljs-meta { + color: #969896; +} + +.hljs-string, +.hljs-variable, +.hljs-template-variable, +.hljs-strong, +.hljs-emphasis, +.hljs-quote { + color: #df5000; +} + +.hljs-keyword, +.hljs-selector-tag, +.hljs-type { + color: #a71d5d; +} + +.hljs-literal, +.hljs-symbol, +.hljs-bullet, +.hljs-attribute { + color: #0086b3; +} + +.hljs-section, +.hljs-name { + color: #63a35c; +} + +.hljs-tag { + color: #333333; +} + +.hljs-title, +.hljs-attr, +.hljs-selector-id, +.hljs-selector-class, +.hljs-selector-attr, +.hljs-selector-pseudo { + color: #795da3; +} + +.hljs-addition { + color: #55a532; + background-color: #eaffea; +} + +.hljs-deletion { + color: #bd2c00; + background-color: #ffecec; +} + +.hljs-link { + text-decoration: underline; +} + +/* ไปฃ็ ้ซ˜ไบฎ */ +/* PrismJS 1.15.0 +https://prismjs.com/download.html#themes=prism&languages=markup+css+clike+javascript */ +/** + * prism.js default theme for JavaScript, CSS and HTML + * Based on dabblet (http://dabblet.com) + * @author Lea Verou + */ +code[class*="language-"], +pre[class*="language-"] { + color: black; + background: none; + text-shadow: 0 1px white; + font-family: Consolas, Monaco, 'Andale Mono', 'Ubuntu Mono', monospace; + text-align: left; + white-space: pre; + word-spacing: normal; + word-break: normal; + word-wrap: normal; + line-height: 1.5; + + -moz-tab-size: 4; + -o-tab-size: 4; + tab-size: 4; + + -webkit-hyphens: none; + -moz-hyphens: none; + -ms-hyphens: none; + hyphens: none; +} + +pre[class*="language-"]::-moz-selection, +pre[class*="language-"] ::-moz-selection, +code[class*="language-"]::-moz-selection, +code[class*="language-"] ::-moz-selection { + text-shadow: none; + background: #b3d4fc; +} + +pre[class*="language-"]::selection, +pre[class*="language-"] ::selection, +code[class*="language-"]::selection, +code[class*="language-"] ::selection { + text-shadow: none; + background: #b3d4fc; +} + +@media print { + + code[class*="language-"], + pre[class*="language-"] { + text-shadow: none; + } +} + +/* Code blocks */ +pre[class*="language-"] { + padding: 1em; + margin: .5em 0; + overflow: auto; +} + +:not(pre)>code[class*="language-"], +pre[class*="language-"] { + background: #f5f2f0; +} + +/* Inline code */ +:not(pre)>code[class*="language-"] { + padding: .1em; + border-radius: .3em; + white-space: normal; +} + +.token.comment, +.token.prolog, +.token.doctype, +.token.cdata { + color: slategray; +} + +.token.punctuation { + color: #999; +} + +.namespace { + opacity: .7; +} + +.token.property, +.token.tag, +.token.boolean, +.token.number, +.token.constant, +.token.symbol, +.token.deleted { + color: #905; +} + +.token.selector, +.token.attr-name, +.token.string, +.token.char, +.token.builtin, +.token.inserted { + color: #690; +} + +.token.operator, +.token.entity, +.token.url, +.language-css .token.string, +.style .token.string { + color: #9a6e3a; + background: hsla(0, 0%, 100%, .5); +} + +.token.atrule, +.token.attr-value, +.token.keyword { + color: #07a; +} + +.token.function, +.token.class-name { + color: #DD4A68; +} + +.token.regex, +.token.important, +.token.variable { + color: #e90; +} + +.token.important, +.token.bold { + font-weight: bold; +} + +.token.italic { + font-style: italic; +} + +.token.entity { + cursor: help; +} diff --git a/ktransformers/website/src/assets/iconfont/demo_index.html b/ktransformers/website/src/assets/iconfont/demo_index.html new file mode 100644 index 0000000..64a3f85 --- /dev/null +++ b/ktransformers/website/src/assets/iconfont/demo_index.html @@ -0,0 +1,557 @@ + + + + + iconfont Demo + + + + + + + + + + + + + +
+

+ + +

+ +
+
+
    + +
  • + +
    ๅคๅˆถ
    +
    &#xe8b0;
    +
  • + +
  • + +
    ็ฎญๅคดไธ‹
    +
    &#xe85e;
    +
  • + +
  • + +
    ่ฟ›ๅบฆ
    +
    &#xe651;
    +
  • + +
  • + +
    ็Žฏๅฝข่ฟ›ๅบฆๆก
    +
    &#xe617;
    +
  • + +
  • + +
    ๅ‘ๅทฆ1
    +
    &#xe779;
    +
  • + +
  • + +
    ็‚น
    +
    &#xe608;
    +
  • + +
  • + +
    ็ผ–่พ‘
    +
    &#xe7dd;
    +
  • + +
  • + +
    ๅˆ ้™ค
    +
    &#xe614;
    +
  • + +
  • + +
    ไธŠไผ 
    +
    &#xe618;
    +
  • + +
  • + +
    ๆŽข็ดข-้€‰ไธญ
    +
    &#xe621;
    +
  • + +
  • + +
    ellipsis
    +
    &#xe657;
    +
  • + +
  • + +
    ๅ‘้€
    +
    &#xe60c;
    +
  • + +
  • + +
    ๅˆ—่กจ
    +
    &#xe62d;
    +
  • + +
  • + +
    ๅˆ—่กจ
    +
    &#xe639;
    +
  • + +
  • + +
    ้‡่ฏ•
    +
    &#xe6bd;
    +
  • + +
  • + +
    Fork ่ฎฐๅฝ•
    +
    &#xe826;
    +
  • + +
+
+

Unicode ๅผ•็”จ

+
+ +

Unicode ๆ˜ฏๅญ—ไฝ“ๅœจ็ฝ‘้กต็ซฏๆœ€ๅŽŸๅง‹็š„ๅบ”็”จๆ–นๅผ๏ผŒ็‰น็‚นๆ˜ฏ๏ผš

+
    +
  • ๆ”ฏๆŒๆŒ‰ๅญ—ไฝ“็š„ๆ–นๅผๅŽปๅŠจๆ€่ฐƒๆ•ดๅ›พๆ ‡ๅคงๅฐ๏ผŒ้ขœ่‰ฒ็ญ‰็ญ‰ใ€‚
  • +
  • ้ป˜่ฎคๆƒ…ๅ†ตไธ‹ไธๆ”ฏๆŒๅคš่‰ฒ๏ผŒ็›ดๆŽฅๆทปๅŠ ๅคš่‰ฒๅ›พๆ ‡ไผš่‡ชๅŠจๅŽป่‰ฒใ€‚
  • +
+
+

ๆณจๆ„๏ผšๆ–ฐ็‰ˆ iconfont ๆ”ฏๆŒไธค็งๆ–นๅผๅผ•็”จๅคš่‰ฒๅ›พๆ ‡๏ผšSVG symbol ๅผ•็”จๆ–นๅผๅ’Œๅฝฉ่‰ฒๅญ—ไฝ“ๅ›พๆ ‡ๆจกๅผใ€‚๏ผˆไฝฟ็”จๅฝฉ่‰ฒๅญ—ไฝ“ๅ›พๆ ‡้œ€่ฆๅœจใ€Œ็ผ–่พ‘้กน็›ฎใ€ไธญๅผ€ๅฏใ€Œๅฝฉ่‰ฒใ€้€‰้กนๅŽๅนถ้‡ๆ–ฐ็”Ÿๆˆใ€‚๏ผ‰

+
+

Unicode ไฝฟ็”จๆญฅ้ชคๅฆ‚ไธ‹๏ผš

+

็ฌฌไธ€ๆญฅ๏ผšๆ‹ท่ด้กน็›ฎไธ‹้ข็”Ÿๆˆ็š„ @font-face

+
@font-face {
+  font-family: 'iconfont';
+  src: url('iconfont.woff2?t=1717950820214') format('woff2'),
+       url('iconfont.woff?t=1717950820214') format('woff'),
+       url('iconfont.ttf?t=1717950820214') format('truetype'),
+       url('iconfont.svg?t=1717950820214#iconfont') format('svg');
+}
+
+

็ฌฌไบŒๆญฅ๏ผšๅฎšไน‰ไฝฟ็”จ iconfont ็š„ๆ ทๅผ

+
.iconfont {
+  font-family: "iconfont" !important;
+  font-size: 16px;
+  font-style: normal;
+  -webkit-font-smoothing: antialiased;
+  -moz-osx-font-smoothing: grayscale;
+}
+
+

็ฌฌไธ‰ๆญฅ๏ผšๆŒ‘้€‰็›ธๅบ”ๅ›พๆ ‡ๅนถ่Žทๅ–ๅญ—ไฝ“็ผ–็ ๏ผŒๅบ”็”จไบŽ้กต้ข

+
+<span class="iconfont">&#x33;</span>
+
+
+

"iconfont" ๆ˜ฏไฝ ้กน็›ฎไธ‹็š„ font-familyใ€‚ๅฏไปฅ้€š่ฟ‡็ผ–่พ‘้กน็›ฎๆŸฅ็œ‹๏ผŒ้ป˜่ฎคๆ˜ฏ "iconfont"ใ€‚

+
+
+
+
+
    + +
  • + +
    + ๅคๅˆถ +
    +
    .icon-copy +
    +
  • + +
  • + +
    + ็ฎญๅคดไธ‹ +
    +
    .icon-arrow-down +
    +
  • + +
  • + +
    + ่ฟ›ๅบฆ +
    +
    .icon-usage-progress +
    +
  • + +
  • + +
    + ็Žฏๅฝข่ฟ›ๅบฆๆก +
    +
    .icon-gen-progress +
    +
  • + +
  • + +
    + ๅ‘ๅทฆ1 +
    +
    .icon-back +
    +
  • + +
  • + +
    + ็‚น +
    +
    .icon-point +
    +
  • + +
  • + +
    + ็ผ–่พ‘ +
    +
    .icon-edit +
    +
  • + +
  • + +
    + ๅˆ ้™ค +
    +
    .icon-delete +
    +
  • + +
  • + +
    + ไธŠไผ  +
    +
    .icon-upload-1 +
    +
  • + +
  • + +
    + ๆŽข็ดข-้€‰ไธญ +
    +
    .icon-explore +
    +
  • + +
  • + +
    + ellipsis +
    +
    .icon-ellipsis +
    +
  • + +
  • + +
    + ๅ‘้€ +
    +
    .icon-sent +
    +
  • + +
  • + +
    + ๅˆ—่กจ +
    +
    .icon-list-list +
    +
  • + +
  • + +
    + ๅˆ—่กจ +
    +
    .icon-list-icon +
    +
  • + +
  • + +
    + ้‡่ฏ• +
    +
    .icon-zhongshi +
    +
  • + +
  • + +
    + Fork ่ฎฐๅฝ• +
    +
    .icon-log +
    +
  • + +
+
+

font-class ๅผ•็”จ

+
+ +

font-class ๆ˜ฏ Unicode ไฝฟ็”จๆ–นๅผ็š„ไธ€็งๅ˜็ง๏ผŒไธป่ฆๆ˜ฏ่งฃๅ†ณ Unicode ไนฆๅ†™ไธ็›ด่ง‚๏ผŒ่ฏญๆ„ไธๆ˜Ž็กฎ็š„้—ฎ้ข˜ใ€‚

+

ไธŽ Unicode ไฝฟ็”จๆ–นๅผ็›ธๆฏ”๏ผŒๅ…ทๆœ‰ๅฆ‚ไธ‹็‰น็‚น๏ผš

+
    +
  • ็›ธๆฏ”ไบŽ Unicode ่ฏญๆ„ๆ˜Ž็กฎ๏ผŒไนฆๅ†™ๆ›ด็›ด่ง‚ใ€‚ๅฏไปฅๅพˆๅฎนๆ˜“ๅˆ†่พจ่ฟ™ไธช icon ๆ˜ฏไป€ไนˆใ€‚
  • +
  • ๅ› ไธบไฝฟ็”จ class ๆฅๅฎšไน‰ๅ›พๆ ‡๏ผŒๆ‰€ไปฅๅฝ“่ฆๆ›ฟๆขๅ›พๆ ‡ๆ—ถ๏ผŒๅช้œ€่ฆไฟฎๆ”น class ้‡Œ้ข็š„ Unicode ๅผ•็”จใ€‚
  • +
+

ไฝฟ็”จๆญฅ้ชคๅฆ‚ไธ‹๏ผš

+

็ฌฌไธ€ๆญฅ๏ผšๅผ•ๅ…ฅ้กน็›ฎไธ‹้ข็”Ÿๆˆ็š„ fontclass ไปฃ็ ๏ผš

+
<link rel="stylesheet" href="./iconfont.css">
+
+

็ฌฌไบŒๆญฅ๏ผšๆŒ‘้€‰็›ธๅบ”ๅ›พๆ ‡ๅนถ่Žทๅ–็ฑปๅ๏ผŒๅบ”็”จไบŽ้กต้ข๏ผš

+
<span class="iconfont icon-xxx"></span>
+
+
+

" + iconfont" ๆ˜ฏไฝ ้กน็›ฎไธ‹็š„ font-familyใ€‚ๅฏไปฅ้€š่ฟ‡็ผ–่พ‘้กน็›ฎๆŸฅ็œ‹๏ผŒ้ป˜่ฎคๆ˜ฏ "iconfont"ใ€‚

+
+
+
+
+
    + +
  • + +
    ๅคๅˆถ
    +
    #icon-copy
    +
  • + +
  • + +
    ็ฎญๅคดไธ‹
    +
    #icon-arrow-down
    +
  • + +
  • + +
    ่ฟ›ๅบฆ
    +
    #icon-usage-progress
    +
  • + +
  • + +
    ็Žฏๅฝข่ฟ›ๅบฆๆก
    +
    #icon-gen-progress
    +
  • + +
  • + +
    ๅ‘ๅทฆ1
    +
    #icon-back
    +
  • + +
  • + +
    ็‚น
    +
    #icon-point
    +
  • + +
  • + +
    ็ผ–่พ‘
    +
    #icon-edit
    +
  • + +
  • + +
    ๅˆ ้™ค
    +
    #icon-delete
    +
  • + +
  • + +
    ไธŠไผ 
    +
    #icon-upload-1
    +
  • + +
  • + +
    ๆŽข็ดข-้€‰ไธญ
    +
    #icon-explore
    +
  • + +
  • + +
    ellipsis
    +
    #icon-ellipsis
    +
  • + +
  • + +
    ๅ‘้€
    +
    #icon-sent
    +
  • + +
  • + +
    ๅˆ—่กจ
    +
    #icon-list-list
    +
  • + +
  • + +
    ๅˆ—่กจ
    +
    #icon-list-icon
    +
  • + +
  • + +
    ้‡่ฏ•
    +
    #icon-zhongshi
    +
  • + +
  • + +
    Fork ่ฎฐๅฝ•
    +
    #icon-log
    +
  • + +
+
+

Symbol ๅผ•็”จ

+
+ +

่ฟ™ๆ˜ฏไธ€็งๅ…จๆ–ฐ็š„ไฝฟ็”จๆ–นๅผ๏ผŒๅบ”่ฏฅ่ฏด่ฟ™ๆ‰ๆ˜ฏๆœชๆฅ็š„ไธปๆต๏ผŒไนŸๆ˜ฏๅนณๅฐ็›ฎๅ‰ๆŽจ่็š„็”จๆณ•ใ€‚็›ธๅ…ณไป‹็ปๅฏไปฅๅ‚่€ƒ่ฟ™็ฏ‡ๆ–‡็ซ  + ่ฟ™็ง็”จๆณ•ๅ…ถๅฎžๆ˜ฏๅšไบ†ไธ€ไธช SVG ็š„้›†ๅˆ๏ผŒไธŽๅฆๅค–ไธค็ง็›ธๆฏ”ๅ…ทๆœ‰ๅฆ‚ไธ‹็‰น็‚น๏ผš

+
    +
  • ๆ”ฏๆŒๅคš่‰ฒๅ›พๆ ‡ไบ†๏ผŒไธๅ†ๅ—ๅ•่‰ฒ้™ๅˆถใ€‚
  • +
  • ้€š่ฟ‡ไธ€ไบ›ๆŠ€ๅทง๏ผŒๆ”ฏๆŒๅƒๅญ—ไฝ“้‚ฃๆ ท๏ผŒ้€š่ฟ‡ font-size, color ๆฅ่ฐƒๆ•ดๆ ทๅผใ€‚
  • +
  • ๅ…ผๅฎนๆ€ง่พƒๅทฎ๏ผŒๆ”ฏๆŒ IE9+๏ผŒๅŠ็Žฐไปฃๆต่งˆๅ™จใ€‚
  • +
  • ๆต่งˆๅ™จๆธฒๆŸ“ SVG ็š„ๆ€ง่ƒฝไธ€่ˆฌ๏ผŒ่ฟ˜ไธๅฆ‚ pngใ€‚
  • +
+

ไฝฟ็”จๆญฅ้ชคๅฆ‚ไธ‹๏ผš

+

็ฌฌไธ€ๆญฅ๏ผšๅผ•ๅ…ฅ้กน็›ฎไธ‹้ข็”Ÿๆˆ็š„ symbol ไปฃ็ ๏ผš

+
<script src="./iconfont.js"></script>
+
+

็ฌฌไบŒๆญฅ๏ผšๅŠ ๅ…ฅ้€š็”จ CSS ไปฃ็ ๏ผˆๅผ•ๅ…ฅไธ€ๆฌกๅฐฑ่กŒ๏ผ‰๏ผš

+
<style>
+.icon {
+  width: 1em;
+  height: 1em;
+  vertical-align: -0.15em;
+  fill: currentColor;
+  overflow: hidden;
+}
+</style>
+
+

็ฌฌไธ‰ๆญฅ๏ผšๆŒ‘้€‰็›ธๅบ”ๅ›พๆ ‡ๅนถ่Žทๅ–็ฑปๅ๏ผŒๅบ”็”จไบŽ้กต้ข๏ผš

+
<svg class="icon" aria-hidden="true">
+  <use xlink:href="#icon-xxx"></use>
+</svg>
+
+
+
+ +
+
+ + + diff --git a/ktransformers/website/src/assets/iconfont/iconfont.css b/ktransformers/website/src/assets/iconfont/iconfont.css new file mode 100644 index 0000000..f173177 --- /dev/null +++ b/ktransformers/website/src/assets/iconfont/iconfont.css @@ -0,0 +1,80 @@ +@font-face { + font-family: "iconfont"; /* Project id 4550268 */ + src: url('iconfont.woff2?t=1717950820214') format('woff2'), + url('iconfont.woff?t=1717950820214') format('woff'), + url('iconfont.ttf?t=1717950820214') format('truetype'), + url('iconfont.svg?t=1717950820214#iconfont') format('svg'); +} + +.iconfont { + font-family: "iconfont" !important; + font-size: 16px; + font-style: normal; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; +} + +.icon-copy:before { + content: "\e8b0"; +} + +.icon-arrow-down:before { + content: "\e85e"; +} + +.icon-usage-progress:before { + content: "\e651"; +} + +.icon-gen-progress:before { + content: "\e617"; +} + +.icon-back:before { + content: "\e779"; +} + +.icon-point:before { + content: "\e608"; +} + +.icon-edit:before { + content: "\e7dd"; +} + +.icon-delete:before { + content: "\e614"; +} + +.icon-upload-1:before { + content: "\e618"; +} + +.icon-explore:before { + content: "\e621"; +} + +.icon-ellipsis:before { + content: "\e657"; +} + +.icon-sent:before { + content: "\e60c"; +} + +.icon-list-list:before { + content: "\e62d"; +} + +.icon-list-icon:before { + content: "\e639"; +} + +.icon-zhongshi:before { + content: "\e6bd"; +} + +.icon-log:before { + content: "\e826"; +} + diff --git a/ktransformers/website/src/assets/iconfont/iconfont.js b/ktransformers/website/src/assets/iconfont/iconfont.js new file mode 100644 index 0000000..c71cfef --- /dev/null +++ b/ktransformers/website/src/assets/iconfont/iconfont.js @@ -0,0 +1 @@ +window._iconfont_svg_string_4550268='',function(l){var t=(t=document.getElementsByTagName("script"))[t.length-1],c=t.getAttribute("data-injectcss"),t=t.getAttribute("data-disable-injectsvg");if(!t){var i,o,e,a,h,n=function(t,c){c.parentNode.insertBefore(t,c)};if(c&&!l.__iconfont__svg__cssinject__){l.__iconfont__svg__cssinject__=!0;try{document.write("")}catch(t){console&&console.log(t)}}i=function(){var t,c=document.createElement("div");c.innerHTML=l._iconfont_svg_string_4550268,(c=c.getElementsByTagName("svg")[0])&&(c.setAttribute("aria-hidden","true"),c.style.position="absolute",c.style.width=0,c.style.height=0,c.style.overflow="hidden",c=c,(t=document.body).firstChild?n(c,t.firstChild):t.appendChild(c))},document.addEventListener?~["complete","loaded","interactive"].indexOf(document.readyState)?setTimeout(i,0):(o=function(){document.removeEventListener("DOMContentLoaded",o,!1),i()},document.addEventListener("DOMContentLoaded",o,!1)):document.attachEvent&&(e=i,a=l.document,h=!1,d(),a.onreadystatechange=function(){"complete"==a.readyState&&(a.onreadystatechange=null,s())})}function s(){h||(h=!0,e())}function d(){try{a.documentElement.doScroll("left")}catch(t){return void setTimeout(d,50)}s()}}(window); \ No newline at end of file diff --git a/ktransformers/website/src/assets/iconfont/iconfont.json b/ktransformers/website/src/assets/iconfont/iconfont.json new file mode 100644 index 0000000..2780730 --- /dev/null +++ b/ktransformers/website/src/assets/iconfont/iconfont.json @@ -0,0 +1,121 @@ +{ + "id": "4550268", + "name": "Lexllama", + "font_family": "iconfont", + "css_prefix_text": "icon-", + "description": "Lexllamaๅผ€ๆบ้กน็›ฎไฝฟ็”จ", + "glyphs": [ + { + "icon_id": "11372665", + "name": "ๅคๅˆถ", + "font_class": "copy", + "unicode": "e8b0", + "unicode_decimal": 59568 + }, + { + "icon_id": "34202237", + "name": "็ฎญๅคดไธ‹", + "font_class": "arrow-down", + "unicode": "e85e", + "unicode_decimal": 59486 + }, + { + "icon_id": "7766233", + "name": "่ฟ›ๅบฆ", + "font_class": "usage-progress", + "unicode": "e651", + "unicode_decimal": 58961 + }, + { + "icon_id": "38865122", + "name": "็Žฏๅฝข่ฟ›ๅบฆๆก", + "font_class": "gen-progress", + "unicode": "e617", + "unicode_decimal": 58903 + }, + { + "icon_id": "577406", + "name": "ๅ‘ๅทฆ1", + "font_class": "back", + "unicode": "e779", + "unicode_decimal": 59257 + }, + { + "icon_id": "1920286", + "name": "็‚น", + "font_class": "point", + "unicode": "e608", + "unicode_decimal": 58888 + }, + { + "icon_id": "8866967", + "name": "็ผ–่พ‘", + "font_class": "edit", + "unicode": "e7dd", + "unicode_decimal": 59357 + }, + { + "icon_id": "10199175", + "name": "ๅˆ ้™ค", + "font_class": "delete", + "unicode": "e614", + "unicode_decimal": 58900 + }, + { + "icon_id": "1010111", + "name": "ไธŠไผ ", + "font_class": "upload-1", + "unicode": "e618", + "unicode_decimal": 58904 + }, + { + "icon_id": "351773", + "name": "ๆŽข็ดข-้€‰ไธญ", + "font_class": "explore", + "unicode": "e621", + "unicode_decimal": 58913 + }, + { + "icon_id": "564941", + "name": "ellipsis", + "font_class": "ellipsis", + "unicode": "e657", + "unicode_decimal": 58967 + }, + { + "icon_id": "1048859", + "name": "ๅ‘้€", + "font_class": "sent", + "unicode": "e60c", + "unicode_decimal": 58892 + }, + { + "icon_id": "1304951", + "name": "ๅˆ—่กจ", + "font_class": "list-list", + "unicode": "e62d", + "unicode_decimal": 58925 + }, + { + "icon_id": "8676284", + "name": "ๅˆ—่กจ", + "font_class": "list-icon", + "unicode": "e639", + "unicode_decimal": 58937 + }, + { + "icon_id": "22290034", + "name": "้‡่ฏ•", + "font_class": "zhongshi", + "unicode": "e6bd", + "unicode_decimal": 59069 + }, + { + "icon_id": "22961085", + "name": "Fork ่ฎฐๅฝ•", + "font_class": "log", + "unicode": "e826", + "unicode_decimal": 59430 + } + ] +} diff --git a/ktransformers/website/src/assets/iconfont/iconfont.svg b/ktransformers/website/src/assets/iconfont/iconfont.svg new file mode 100644 index 0000000..a2708a9 --- /dev/null +++ b/ktransformers/website/src/assets/iconfont/iconfont.svg @@ -0,0 +1,51 @@ + + + + Created by iconfont + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ktransformers/website/src/assets/iconfont/iconfont.ttf b/ktransformers/website/src/assets/iconfont/iconfont.ttf new file mode 100644 index 0000000..8202d72 Binary files /dev/null and b/ktransformers/website/src/assets/iconfont/iconfont.ttf differ diff --git a/ktransformers/website/src/assets/iconfont/iconfont.woff b/ktransformers/website/src/assets/iconfont/iconfont.woff new file mode 100644 index 0000000..51727de Binary files /dev/null and b/ktransformers/website/src/assets/iconfont/iconfont.woff differ diff --git a/ktransformers/website/src/assets/iconfont/iconfont.woff2 b/ktransformers/website/src/assets/iconfont/iconfont.woff2 new file mode 100644 index 0000000..9493057 Binary files /dev/null and b/ktransformers/website/src/assets/iconfont/iconfont.woff2 differ diff --git a/ktransformers/website/src/components/chat/index.vue b/ktransformers/website/src/components/chat/index.vue new file mode 100644 index 0000000..8a7f435 --- /dev/null +++ b/ktransformers/website/src/components/chat/index.vue @@ -0,0 +1,800 @@ + + + + + \ No newline at end of file diff --git a/ktransformers/website/src/conf/config.ts b/ktransformers/website/src/conf/config.ts new file mode 100644 index 0000000..ed2e9e1 --- /dev/null +++ b/ktransformers/website/src/conf/config.ts @@ -0,0 +1,11 @@ +declare global { + interface Window { + configWeb: { + apiUrl: string; + port: string; + }; + } + } + +export const baseURL = window.configWeb.apiUrl; +export const basePort = window.configWeb.port; diff --git a/ktransformers/website/src/locals/en.js b/ktransformers/website/src/locals/en.js new file mode 100644 index 0000000..afbb5ba --- /dev/null +++ b/ktransformers/website/src/locals/en.js @@ -0,0 +1,58 @@ +// en.js +export default { + home: { + explore: 'Explore', + language: 'Choose Language', + english: 'English', + chinese: 'Chinese', + today: 'Today', + previous:'Previous', + withoutAssistantTip:'The KTransformers of this record has been deleted. The user can only view historical conversation information and cannot continue the conversation!', + deleteThreadTip:'Deleting records will clear historical information~' + }, + chat:{ + inputTip:"Send a message and chat with the KTransformers ~", + }, + explore:{ + description: "Based on Lexllama, letโ€™s create your own KTransformers~", + configuring: "Configuring", + completed: "Completed", + assistantName: "Name", + assistantDescription: "Description", + assistantStatus: "Status", + createAssistant: "Create New KTransformers", + deleteAssistant: "Are you sure to delete this? After deleting the KTransformers, its KVCache will also be cleared simultaneously~", + }, + config:{ + title:'Configure your KTransformers', + fileTip:"Only support text, docx, .ppt, .pdf format.", + reConfigTip:'Reconfig KTransformers needs to delete kvcache, please choose carefully', + secletFile:'Select Files', + outOfSize:'File size exceeds 10MB, please reselect', + fileExist:'The file already exists, please reselect', + createAssistant:'Assistant created successfully, click the build button to start building KVCache', + }, + build:{ + title:'Building Logs', + step1:'Parse uploded files', + parsingFileStep1:'File upload and reception completed', + parsingFileStep2:{ + parse:"Parsing", + file:"file(s)", + total:'total', + }, + parsingFileStep3:'Prompt loaded, ready to generate KVCache', + step2:'Generate KVCache', + generateStep1:'Generate KVCache calculation plan', + generateStep2:{ + calculate:"calculating", + token:"tokens", + total:'total', + }, + generateStep3:'KVCache has been generated successfully', + durationTime:'Duration:', + remainTime:'Time left:', + buildProgress:'Building Progress', + storageUsage:'KVCache Storage Usage', + } +} diff --git a/ktransformers/website/src/locals/index.js b/ktransformers/website/src/locals/index.js new file mode 100644 index 0000000..d9942d7 --- /dev/null +++ b/ktransformers/website/src/locals/index.js @@ -0,0 +1,18 @@ +// index.js +import { createI18n } from 'vue-i18n' +import zh from './zh' +import en from './en' + +const messages = { + en, + zh, +} +const language = (navigator.language || 'en').toLocaleLowerCase() // ่ฟ™ๆ˜ฏ่Žทๅ–ๆต่งˆๅ™จ็š„่ฏญ่จ€ +const i18n = createI18n({ + legacy: false, // you must set `false`, to use Compostion API + locale: localStorage.getItem('lang') || language.split('-')[0] || 'en', // ้ฆ–ๅ…ˆไปŽ็ผ“ๅญ˜้‡Œๆ‹ฟ๏ผŒๆฒกๆœ‰็š„่ฏๅฐฑ็”จๆต่งˆๅ™จ่ฏญ่จ€๏ผŒ + fallbackLocale: 'en', // ่ฎพ็ฝฎๅค‡็”จ่ฏญ่จ€ + messages, +}) + +export default i18n \ No newline at end of file diff --git a/ktransformers/website/src/locals/zh.js b/ktransformers/website/src/locals/zh.js new file mode 100644 index 0000000..6a84875 --- /dev/null +++ b/ktransformers/website/src/locals/zh.js @@ -0,0 +1,58 @@ +// zh.js +export default { + home: { + explore: 'ๆŽข็ดข', + language: '้€‰ๆ‹ฉ่ฏญ่จ€', + english: '่‹ฑ่ฏญ', + chinese: 'ไธญๆ–‡', + today: 'ไปŠๅคฉ', + previous:'ๅކๅฒ', + withoutAssistantTip:'ๆœฌ่ฎฐๅฝ•็š„KTransformersๅทฒ่ขซๅˆ ้™ค๏ผŒ็”จๆˆทๅช่ƒฝๆŸฅ็œ‹ๅކๅฒๅฏน่ฏไฟกๆฏ่€Œๆ— ๆณ•็ปง็ปญๅฏน่ฏ!', + deleteThreadTip:'ๅˆ ้™ค่ฎฐๅฝ•ไผšๆธ…้™คๅކๅฒไฟกๆฏๅ“ฆ๏ฝž' + }, + chat:{ + inputTip:"ๅ‘้€ไฟกๆฏๅ’Œ KTransformers ็•…่Šๅง๏ฝž", + }, + explore:{ + description: "ๅŸบไบŽLexllama๏ผŒไธ€่ตทๆฅๅˆ›ๅปบไฝ ็š„ไธ“ๅฑžKTransformersๅง~", + configuring: "้…็ฝฎไธญ", + completed: "ๅฎŒๆˆ", + assistantName: "ๅ็งฐ", + assistantDescription: "ๆ่ฟฐ", + assistantStatus: "Status", + createAssistant: "ๅˆ›ๅปบๆ–ฐ็š„KTransformers", + deleteAssistant: "ๆ˜ฏๅฆ็กฎ่ฎคๅˆ ้™คKTransformers๏ผŒๅˆ ้™คKTransformersไน‹ๅŽๅ…ถKVCacheไนŸไผš่ขซๅŒๆญฅๆธ…็†ๆމๅ“ฆ~", + }, + config:{ + title:'้…็ฝฎไฝ ็š„KTransformers', + fileTip:"ไป…ๆ”ฏๆŒไธŠไผ ๆ–‡ไปถๆ ผๅผไธบ .text, docx, .ppt, .pdf format.", + secletFile:'้€‰ๆ‹ฉๆ–‡ไปถ', + outOfSize:'ๆ–‡ไปถๅคงๅฐ่ถ…ๅ‡บ10MB๏ผŒ่ฏท้‡ๆ–ฐ้€‰ๆ‹ฉ', + fileExist:'ๆ–‡ไปถๅทฒๅญ˜ๅœจ๏ผŒ่ฏท้‡ๆ–ฐ้€‰ๆ‹ฉ', + createAssistant:'KTransformersๅˆ›ๅปบๆˆๅŠŸ๏ผŒ็‚นๅ‡ปbuildๆŒ‰้’ฎๅผ€ๅง‹ๆž„ๅปบKVCache', + }, + build:{ + title:'ๆž„ๅปบๆ—ฅๅฟ—', + step1:'่งฃๆžไธŠไผ ๆ–‡ไปถ', + parsingFileStep1:'ๆ–‡ไปถไธŠไผ ๆŽฅๆ”ถๅฎŒๆˆ', + parsingFileStep2:{ + parse:"ๆญฃๅœจ่งฃๆž็ฌฌ", + file:"ๆ–‡ไปถ", + total:'ๅ…ฑ', + }, + parsingFileStep3:'Prompt่ฃ…่ฝฝๅฎŒๆฏ•๏ผŒๅ‡†ๅค‡็”ŸๆˆKVCache', + step2:'็”Ÿๆˆ KVCache', + generateStep1:'็”ŸๆˆKVCache่ฎก็ฎ—่ฎกๅˆ’', + generateStep2:{ + calculate:"ๆญฃๅœจ่ฎก็ฎ—", + token:"tokens", + total:'ๅ…ฑ', + }, + generateStep3:'KVCacheๅทฒ็”ŸๆˆๅฎŒๆˆ', + durationTime:'ๆŒ็ปญๆ—ถ้—ด๏ผš', + remainTime:'ๅ‰ฉไฝ™ๆ—ถ้—ด๏ผš', + buildProgress:'ๆž„ๅปบ่ฟ›ๅบฆ', + storageUsage:'ๅญ˜ๅ‚จไฝฟ็”จ๏ผš', + + } +} diff --git a/ktransformers/website/src/main.ts b/ktransformers/website/src/main.ts new file mode 100644 index 0000000..fcf655c --- /dev/null +++ b/ktransformers/website/src/main.ts @@ -0,0 +1,18 @@ +import { createApp } from 'vue' +import App from './App.vue' +import router from './router' +import store from './store' +import ElementPlus from 'element-plus' +import 'element-plus/dist/index.css' +import VueApexCharts from "vue3-apexcharts" +import i18n from '@/locals' + +const app = createApp(App) + +app.use(ElementPlus) + +app.use(i18n) +app.use(VueApexCharts) +app.use(store) +app.use(router) +app.mount('#app') diff --git a/ktransformers/website/src/router/index.ts b/ktransformers/website/src/router/index.ts new file mode 100644 index 0000000..76a2d61 --- /dev/null +++ b/ktransformers/website/src/router/index.ts @@ -0,0 +1,24 @@ +import { createRouter, createWebHashHistory, RouteRecordRaw, createWebHistory } from 'vue-router' +import HomeView from '@/views/home.vue' + +const routes: Array = [ + { + path: '/', + name: 'home', + component: HomeView, + redirect: '/chat', + children: [{ + path: '/chat', + name: '', + component: () => import(/* webpackChunkName: "about" */ '../components/chat/index.vue') + },] + }, + +] + +const router = createRouter({ + history: createWebHashHistory(), + routes +}) + +export default router diff --git a/ktransformers/website/src/shims-vue.d.ts b/ktransformers/website/src/shims-vue.d.ts new file mode 100644 index 0000000..728a02f --- /dev/null +++ b/ktransformers/website/src/shims-vue.d.ts @@ -0,0 +1,10 @@ +/* eslint-disable */ +declare module '*.vue' { + import type { DefineComponent } from 'vue' + const component: DefineComponent<{}, {}, any> + export default component + +} + +declare module '@/locals' +declare module 'pdfobject'; diff --git a/ktransformers/website/src/store/index.ts b/ktransformers/website/src/store/index.ts new file mode 100644 index 0000000..7f5b89c --- /dev/null +++ b/ktransformers/website/src/store/index.ts @@ -0,0 +1,14 @@ +import { createStore } from 'vuex' + +export default createStore({ + state: { + }, + getters: { + }, + mutations: { + }, + actions: { + }, + modules: { + } +}) diff --git a/ktransformers/website/src/utils/copy.ts b/ktransformers/website/src/utils/copy.ts new file mode 100644 index 0000000..c36e6cc --- /dev/null +++ b/ktransformers/website/src/utils/copy.ts @@ -0,0 +1,111 @@ +import { ElMessage } from "element-plus"; +const copy = (value: string) => { + //Try using the navigator.clipboard.writeText method + if (navigator.clipboard && window.isSecureContext) { + navigator.clipboard.writeText(value) + .then(() => { + //Using ElMessage to Display Success Messages in Windows Systems + if (navigator.appVersion.includes("Win")) { + ElMessage({ + message: "ๅ†…ๅฎนๅคๅˆถๆˆๅŠŸ!", + type: "success", + plain: true, + }); + } else { + //Using custom DOM elements to display success messages in macOS system + showCopySuccessMessage(); + } + }) + .catch(() => { + //Using ElMessage to Display Failure Messages in Windows Systems + if (navigator.appVersion.includes("Win")) { + ElMessage({ + message: "ๅ†…ๅฎนๅคๅˆถๅคฑ่ดฅ!", + type: "error", + plain: true, + }); + } else { + //Using custom DOM elements to display failure messages in macOS system + showCopyErrorMessage(); + } + }); + } else { + const textarea = document.createElement("textarea"); + textarea.value = value; + document.body.appendChild(textarea); + textarea.select(); + try { + const successful = document.execCommand('copy'); + if (successful) { + if (navigator.appVersion.includes("Win")) { + ElMessage({ + message: "ๅ†…ๅฎนๅคๅˆถๆˆๅŠŸ!", + type: "success", + plain: true, + }); + } else { + showCopySuccessMessage(); + } + } else { + if (navigator.appVersion.includes("Win")) { + ElMessage({ + message: "ๅ†…ๅฎนๅคๅˆถๅคฑ่ดฅ!", + type: "error", + plain: true, + }); + } else { + showCopyErrorMessage(); + } + } + } catch (err) { + if (navigator.appVersion.includes("Win")) { + ElMessage({ + message: "ๅ†…ๅฎนๅคๅˆถๅคฑ่ดฅ!", + type: "error", + plain: true, + }); + } else { + showCopyErrorMessage(); + } + } + document.body.removeChild(textarea); + } +}; + +function showCopySuccessMessage() { + const messageElement = document.createElement('div'); + messageElement.textContent = 'ๅ†…ๅฎนๅคๅˆถๆˆๅŠŸ!'; + messageElement.style.position = 'fixed'; + messageElement.style.bottom = '10px'; + messageElement.style.left = '50%'; + messageElement.style.transform = 'translateX(-50%)'; + messageElement.style.padding = '10px'; + messageElement.style.backgroundColor = '#4CAF50'; + messageElement.style.color = 'white'; + messageElement.style.borderRadius = '15px'; + messageElement.style.zIndex = '1000'; + document.body.appendChild(messageElement); + setTimeout(() => { + document.body.removeChild(messageElement); + }, 3000); +} + +function showCopyErrorMessage() { + const messageElement = document.createElement('div'); + messageElement.textContent = 'ๅ†…ๅฎนๅคๅˆถๅคฑ่ดฅ!'; + messageElement.style.position = 'fixed'; + messageElement.style.bottom = '10px'; + messageElement.style.left = '50%'; + messageElement.style.transform = 'translateX(-50%)'; + messageElement.style.padding = '10px'; + messageElement.style.backgroundColor = '#F44336'; + messageElement.style.color = 'white'; + messageElement.style.borderRadius = '5px'; + messageElement.style.zIndex = '1000'; + document.body.appendChild(messageElement); + setTimeout(() => { + document.body.removeChild(messageElement); + }, 3000); +} + +export default copy; \ No newline at end of file diff --git a/ktransformers/website/src/utils/types.ts b/ktransformers/website/src/utils/types.ts new file mode 100644 index 0000000..f9517d6 --- /dev/null +++ b/ktransformers/website/src/utils/types.ts @@ -0,0 +1,125 @@ +export interface IAssistant { + id: string; + object: string; + created_at: number; + name?: string; + description?: string; + model: string; + instructions?: string; + tools: any[]; + tool_resources?: object; + metadata?:{[key:string]:any} + top_p?: number; + temperature?: number; + response_format: string | object; +} + +export interface IAssistantWithStatus { + build_status:{status:string} + id: string; + object: string; + created_at: number; + name?: string; + description?: string; + model: string; + instructions?: string; + tools: any[]; + tool_resources?: object; + metadata?:{[key:string]:any} + top_p?: number; + temperature?: number; + response_format: string | object; +} + +export interface IMessage { + id: string; + object: string; + created_at: number; + thread_id: string; + status: string; + incomplete_details?: object; + completed_at?: number; + incomplete_at?: number; + role: string; + content: any[]; + assistant_id?: string; + run_id?: string; + attachments?: any[]; + metadata:{[key:string]:any} +} + +export interface IThread { + id: string; + object: string; + created_at: number; + tool_resources?: object; + metadata?:{[key:string]:any} +} + +export interface IRun { + id: string; + object: string; + created_at: number; + thread_id: string, + assistant_id: string, + status: string, + required_action?: object, + last_error?: object, + expires_at?: number, + started_at?: number, + cancelled_at?: number, + failed_at?: number, + completed_at?: number, + incomplete_details?: object, + model: string, + instructions: string, + tools: any[], + metadata: Map, + usage?: object, + temperature?: number, + top_p?: number, + max_prompt_tokens?: number, + max_completion_tokens?: number, + truncation_strategy: object, + tool_choice: string | object, + response_format: string | object, +} + +export interface IFile { + id: string, + bytes: number, + created_at: number, + filename: string, + object: string, + purpose: string, +} + +export interface IMessageData { + role: string; + content: any[]; + created_at?: number; + assistant_id?: string, +} + +export interface IThreadAndMessageAndAssistant { + + thread: IThread; + first_message: IMessage; + assistant: IAssistantWithStatus +} +export interface IDeleteResult { + id: string; + object: string; + deleted: boolean; +} +export interface IBuildData { + parsed_file_count:number; + total_file_count:number; + prefilling_current:number; + prefilling_total:number; + build_completed_time:number; + build_started_time:number; + storage_total:number; + storage_usage:number; + status:string +} \ No newline at end of file diff --git a/ktransformers/website/src/views/home.vue b/ktransformers/website/src/views/home.vue new file mode 100644 index 0000000..7d91150 --- /dev/null +++ b/ktransformers/website/src/views/home.vue @@ -0,0 +1,718 @@ + + + + + + diff --git a/ktransformers/website/tests/unit/example.spec.ts b/ktransformers/website/tests/unit/example.spec.ts new file mode 100644 index 0000000..4b21ca7 --- /dev/null +++ b/ktransformers/website/tests/unit/example.spec.ts @@ -0,0 +1,12 @@ +import { shallowMount } from '@vue/test-utils' +import HelloWorld from '@/components/HelloWorld.vue' + +describe('HelloWorld.vue', () => { + it('renders props.msg when passed', () => { + const msg = 'new message' + const wrapper = shallowMount(HelloWorld, { + props: { msg } + }) + expect(wrapper.text()).toMatch(msg) + }) +}) diff --git a/ktransformers/website/tsconfig.json b/ktransformers/website/tsconfig.json new file mode 100644 index 0000000..a4dda20 --- /dev/null +++ b/ktransformers/website/tsconfig.json @@ -0,0 +1,45 @@ +{ + "compilerOptions": { + "target": "es5", + "module": "esnext", + "strict": true, + "jsx": "preserve", + "importHelpers": true, + "moduleResolution": "node", + "skipLibCheck": true, + "esModuleInterop": true, + "allowSyntheticDefaultImports": true, + "forceConsistentCasingInFileNames": true, + "useDefineForClassFields": true, + "sourceMap": true, + "allowJs": true, + "baseUrl": ".", + "types": [ + "webpack-env", + "jest" + ], + "paths": { + "@/*": [ + "src/*" + ] + }, + "lib": [ + "esnext", + "dom", + "dom.iterable", + "scripthost" + ] + }, + "include": [ + "src/**/*.ts", + "src/**/*.tsx", + "src/**/*.vue", + "tests/**/*.ts", + "tests/**/*.tsx", + "config.d.ts" + ], + + "exclude": [ + "node_modules" + ] +} \ No newline at end of file diff --git a/ktransformers/website/vue.config.js b/ktransformers/website/vue.config.js new file mode 100644 index 0000000..e51b1eb --- /dev/null +++ b/ktransformers/website/vue.config.js @@ -0,0 +1,40 @@ + +module.exports = { + // ้…็ฝฎ webpack-dev-server ่กŒไธบใ€‚ + devServer: { + open: false, // ็ผ–่ฏ‘ๅŽ้ป˜่ฎคๆ‰“ๅผ€ๆต่งˆๅ™จ + host: '0.0.0.0', // ๅŸŸๅ + port: 8082, // ็ซฏๅฃ + https: false, // ๆ˜ฏๅฆhttps + proxy: { + '/api': { + target: 'http://localhost:9016/v1', // ไฝ ็š„ๅŽ็ซฏๆœๅŠกๅ™จๅœฐๅ€ + changeOrigin: true, // ๆ˜ฏๅฆๅ…่ฎธ่ทจๅŸŸ + pathRewrite: { + '/api': '' // ๅฐ† '/api' ๅ‰็ผ€ๆ›ฟๆขไธบ็ฉบ๏ผŒๅฆ‚ๆžœไฝ ็š„ๅŽ็ซฏไธ้œ€่ฆ่ฟ™ไธชๅ‰็ผ€ + } + } + } +}, +publicPath: '/web/', // ๅŸบๆœฌ่ทฏๅพ„ +outputDir: 'dist', // ๆž„ๅปบๆ—ถ็š„่พ“ๅ‡บ็›ฎๅฝ• +assetsDir: 'static', // ๆ”พ็ฝฎ้™ๆ€่ต„ๆบ็š„็›ฎๅฝ• +indexPath: 'index.html', // html ็š„่พ“ๅ‡บ่ทฏๅพ„ +filenameHashing: true, // ๆ–‡ไปถๅๅ“ˆๅธŒๅ€ผ +lintOnSave: false, // ๆ˜ฏๅฆๅœจไฟๅญ˜็š„ๆ—ถๅ€™ไฝฟ็”จ `eslint-loader` ่ฟ›่กŒๆฃ€ๆŸฅใ€‚ + +// ็ป„ไปถๆ˜ฏๅฆ‚ไฝ•่ขซๆธฒๆŸ“ๅˆฐ้กต้ขไธญ็š„๏ผŸ ๏ผˆast๏ผšๆŠฝ่ฑก่ฏญๆณ•ๆ ‘๏ผ›vDom๏ผš่™šๆ‹ŸDOM๏ผ‰ +// template ---> ast ---> render ---> vDom ---> ็œŸๅฎž็š„Dom ---> ้กต้ข +// runtime-only๏ผšๅฐ†templateๅœจๆ‰“ๅŒ…็š„ๆ—ถๅ€™๏ผŒๅฐฑๅทฒ็ป็ผ–่ฏ‘ไธบrenderๅ‡ฝๆ•ฐ +// runtime-compiler๏ผšๅœจ่ฟ่กŒ็š„ๆ—ถๅ€™ๆ‰ๅŽป็ผ–่ฏ‘template +runtimeCompiler: false, + +transpileDependencies: [], // babel-loader ้ป˜่ฎคไผš่ทณ่ฟ‡ node_modules ไพ่ต–ใ€‚ +productionSourceMap: false, // ๆ˜ฏๅฆไธบ็”Ÿไบง็Žฏๅขƒๆž„ๅปบ็”Ÿๆˆ source map + +//่ฐƒๆ•ดๅ†…้ƒจ็š„ webpack ้…็ฝฎ +configureWebpack: () => {}, + +chainWebpack: () => {}, + +} \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..3378ef0 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,8 @@ +[build-system] +requires = [ + "setuptools", + "torch == 2.3.1", + "ninja", + "packaging" + ] +build-backend = "setuptools.build_meta" diff --git a/requirements-local_chat.txt b/requirements-local_chat.txt new file mode 100644 index 0000000..917cc03 --- /dev/null +++ b/requirements-local_chat.txt @@ -0,0 +1,5 @@ +fire +transformers +numpy +torch>=2.3.0 +packaging diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..5219f7b --- /dev/null +++ b/setup.py @@ -0,0 +1,249 @@ +#!/usr/bin/env python +# coding=utf-8 +''' +Description : +Author : chenxl +Date : 2024-07-12 07:25:42 +Version : 1.0.0 +LastEditors : chenxl +LastEditTime : 2024-07-27 04:31:03 +''' +import os +import shutil +import sys +import re +import ast +import subprocess +import platform +import io +from pathlib import Path +from packaging.version import parse +import torch.version +from wheel.bdist_wheel import bdist_wheel as _bdist_wheel +from setuptools import setup, Extension +import torch +from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME + +ROOT_DIR = os.path.dirname(__file__) +class VersionInfo: + THIS_DIR = os.path.dirname(os.path.abspath(__file__)) + PACKAGE_NAME = "ktransformers" + def get_cuda_bare_metal_version(self, cuda_dir): + raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True) + output = raw_output.split() + release_idx = output.index("release") + 1 + bare_metal_version = parse(output[release_idx].split(",")[0]) + cuda_version = f"{bare_metal_version.major}{bare_metal_version.minor}" + return cuda_version + + def get_cuda_version_of_torch(self,): + torch_cuda_version = parse(torch.version.cuda) + cuda_version = f"{torch_cuda_version.major}{torch_cuda_version.minor}" + return cuda_version + + def get_platform(self,): + """ + Returns the platform name as used in wheel filenames. + """ + if sys.platform.startswith("linux"): + return f'linux_{platform.uname().machine}' + else: + raise ValueError("Unsupported platform: {}".format(sys.platform)) + + def get_cpu_instruct(self,): + if sys.platform.startswith("linux"): + with open('/proc/cpuinfo', 'r') as cpu_f: + cpuinfo = cpu_f.read() + + flags_line = [line for line in cpuinfo.split('\n') if line.startswith('flags')][0] + flags = flags_line.split(':')[1].strip().split(' ') + for flag in flags: + if 'avx512' in flag: + return 'avx512' + for flag in flags: + if 'avx2' in flag: + return 'avx2' + raise ValueError("Unsupported cpu Instructions: {}".format(flags_line)) + + def get_torch_version(self,): + torch_version_raw = parse(torch.__version__) + torch_version = f"{torch_version_raw.major}{torch_version_raw.minor}" + return torch_version + + def get_package_version(self,): + version_file = os.path.join(Path(VersionInfo.THIS_DIR), VersionInfo.PACKAGE_NAME, "__init__.py") + with open(version_file, "r", encoding="utf-8") as f: + version_match = re.search(r"^__version__\s*=\s*(.*)$", f.read(), re.MULTILINE) + public_version = ast.literal_eval(version_match.group(1)) + package_version = f"{str(public_version)}+cu{self.get_cuda_bare_metal_version(CUDA_HOME)}torch{self.get_torch_version()}{self.get_cpu_instruct()}" + return package_version + + +class BuildWheelsCommand(_bdist_wheel): + def get_wheel_name(self,): + version_info = VersionInfo() + python_version = f"cp{sys.version_info.major}{sys.version_info.minor}" + wheel_filename = f"{VersionInfo.PACKAGE_NAME}-{version_info.get_package_version()}-{python_version}-{python_version}-{version_info.get_platform()}.whl" + return wheel_filename + + + def run(self): + super().run() + impl_tag, abi_tag, plat_tag = self.get_tag() + archive_basename = f"{self.wheel_dist_name}-{impl_tag}-{abi_tag}-{plat_tag}" + wheel_path = os.path.join(self.dist_dir, archive_basename + ".whl") + wheel_name_with_platform = os.path.join(self.dist_dir, self.get_wheel_name()) + os.rename(wheel_path, wheel_name_with_platform) + + +# Convert distutils Windows platform specifiers to CMake -A arguments +PLAT_TO_CMAKE = { + "win32": "Win32", + "win-amd64": "x64", + "win-arm32": "ARM", + "win-arm64": "ARM64", +} + +class CopyExtension(Extension): + def __init__(self, name: str, sourcedir: str = "", copy_file_source="") -> None: + super().__init__(name, sources=[]) + self.sourcedir = os.fspath(Path(sourcedir).resolve()) + self.source_file = copy_file_source +class CMakeExtension(Extension): + def __init__(self, name: str, sourcedir: str = "") -> None: + super().__init__(name, sources=[]) + self.sourcedir = os.fspath(Path(sourcedir).resolve() / "ktransformers/ktransformers_ext") +class CMakeBuild(BuildExtension): + def build_extension(self, ext) -> None: + if isinstance(ext, CopyExtension): + ext_fullpath = Path.cwd() / self.get_ext_fullpath(ext.name) + extdir = ext_fullpath.parent.resolve() + shutil.copy(ext.source_file, extdir) + return + if not isinstance(ext, CMakeExtension): + super().build_extension(ext) + return + ext_fullpath = Path.cwd() / self.get_ext_fullpath(ext.name) + extdir = ext_fullpath.parent.resolve() + + # Using this requires trailing slash for auto-detection & inclusion of + # auxiliary "native" libs + + debug = int(os.environ.get("DEBUG", 0)) if self.debug is None else self.debug + cfg = "Debug" if debug else "Release" + + # CMake lets you override the generator - we need to check this. + # Can be set with Conda-Build, for example. + cmake_generator = os.environ.get("CMAKE_GENERATOR", "") + + # Set Python_EXECUTABLE instead if you use PYBIND11_FINDPYTHON + # EXAMPLE_VERSION_INFO shows you how to pass a value into the C++ code + # from Python. + cmake_args = [ + f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extdir}{os.sep}", + f"-DPYTHON_EXECUTABLE={sys.executable}", + f"-DCMAKE_BUILD_TYPE={cfg}", # not used on MSVC, but no harm + ] + build_args = [] + if "CMAKE_ARGS" in os.environ: + cmake_args += [item for item in os.environ["CMAKE_ARGS"].split(" ") if item] + + # In this example, we pass in the version to C++. You might not need to. + cmake_args += [f"-DEXAMPLE_VERSION_INFO={self.distribution.get_version()}"] + if self.compiler.compiler_type != "msvc": + if not cmake_generator or cmake_generator == "Ninja": + try: + import ninja + + ninja_executable_path = Path(ninja.BIN_DIR) / "ninja" + cmake_args += [ + "-GNinja", + f"-DCMAKE_MAKE_PROGRAM:FILEPATH={ninja_executable_path}", + ] + except ImportError: + pass + + else: + # Single config generators are handled "normally" + single_config = any(x in cmake_generator for x in {"NMake", "Ninja"}) + + # CMake allows an arch-in-generator style for backward compatibility + contains_arch = any(x in cmake_generator for x in {"ARM", "Win64"}) + if not single_config and not contains_arch: + cmake_args += ["-A", PLAT_TO_CMAKE[self.plat_name]] + + # Multi-config generators have a different way to specify configs + if not single_config: + cmake_args += [ + f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{cfg.upper()}={extdir}" + ] + build_args += ["--config", cfg] + + if sys.platform.startswith("darwin"): + # Cross-compile support for macOS - respect ARCHFLAGS if set + archs = re.findall(r"-arch (\S+)", os.environ.get("ARCHFLAGS", "")) + if archs: + cmake_args += ["-DCMAKE_OSX_ARCHITECTURES={}".format(";".join(archs))] + + if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ: + if hasattr(self, "parallel") and self.parallel: + build_args += [f"-j{self.parallel}"] + + build_temp = Path(ext.sourcedir) / "build" + if not build_temp.exists(): + build_temp.mkdir(parents=True) + subprocess.run( + ["cmake", ext.sourcedir, *cmake_args], cwd=build_temp, check=True + ) + subprocess.run( + ["cmake", "--build", ".", *build_args], cwd=build_temp, check=True + ) + +def read_readme() -> str: + p = os.path.join(ROOT_DIR, "README.md") + if os.path.isfile(p): + return io.open(p, "r", encoding="utf-8").read() + else: + return "" + +setup( + name="ktransformers", + version=VersionInfo().get_package_version(), + author="KVCache.ai", + license="Apache 2.0", + description = "KTransformers, pronounced as Quick Transformers, is designed to enhance your Transformers experience with advanced kernel optimizations and placement/parallelism strategies.", + long_description=read_readme(), + long_description_content_type="text/markdown", + cmdclass={"build_ext": CMakeBuild}, + install_requires = [ + "torch >= 2.3.0", + "transformers == 4.43.2", + "fastapi >= 0.111.0", + "langchain >= 0.2.0", + "blessed >= 1.20.0", + "accelerate >= 0.31.0", + "sentencepiece >= 0.1.97", + "setuptools", + "ninja", + "wheel", + "colorlog", + "build", + "packaging", + "fire" + ], + python_requires=">=3.10", + entry_points={ + "console_scripts": [ + "ktransformers=ktransformers.server.main:main", + ], + }, + packages=["ktransformers"], + include_package_data=True, + ext_modules=[ + CUDAExtension('KTransformersOps', [ + 'ktransformers/ktransformers_ext/cuda/custom_gguf/dequant.cu', + 'ktransformers/ktransformers_ext/cuda/binding.cpp', + 'ktransformers/ktransformers_ext/cuda/gptq_marlin/gptq_marlin.cu', + ]), + CMakeExtension("cpuinfer_ext")] +) \ No newline at end of file diff --git a/third_party/llama.cpp b/third_party/llama.cpp new file mode 160000 index 0000000..a94e6ff --- /dev/null +++ b/third_party/llama.cpp @@ -0,0 +1 @@ +Subproject commit a94e6ff8774b7c9f950d9545baf0ce35e8d1ed2f diff --git a/third_party/llamafile/README.md b/third_party/llamafile/README.md new file mode 100644 index 0000000..1308a12 --- /dev/null +++ b/third_party/llamafile/README.md @@ -0,0 +1 @@ +The code in this folder is copied from [Mozilla-Ocho/llamafile](https://github.com/Mozilla-Ocho/llamafile). Special thanks to the Mozilla-Ocho team. diff --git a/third_party/llamafile/bench.h b/third_party/llamafile/bench.h new file mode 100644 index 0000000..6669bd2 --- /dev/null +++ b/third_party/llamafile/bench.h @@ -0,0 +1,25 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/bench.h +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +// -*- mode:c++;indent-tabs-mode:nil;c-basic-offset:4;coding:utf-8 -*- +// vi: set et ft=cpp ts=4 sts=4 sw=4 fenc=utf-8 :vi +#pragma once + +#include + +#include "micros.h" + +#define BENCH(x) \ + do { \ + x; \ + __asm__ volatile("" ::: "memory"); \ + long long start = micros(); \ + for (int i = 0; i < ITERATIONS; ++i) { \ + __asm__ volatile("" ::: "memory"); \ + x; \ + __asm__ volatile("" ::: "memory"); \ + } \ + printf("%9lld us %s\n", (micros() - start + ITERATIONS - 1) / ITERATIONS, #x); \ + } while (0) diff --git a/third_party/llamafile/flags.cpp b/third_party/llamafile/flags.cpp new file mode 100644 index 0000000..ae3c112 --- /dev/null +++ b/third_party/llamafile/flags.cpp @@ -0,0 +1,8 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/flags.cpp +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#include "flags.h" + +bool FLAG_precise = false; diff --git a/third_party/llamafile/flags.h b/third_party/llamafile/flags.h new file mode 100644 index 0000000..b3744e1 --- /dev/null +++ b/third_party/llamafile/flags.h @@ -0,0 +1,8 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/flags.cpp +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#pragma once + +extern bool FLAG_precise; diff --git a/third_party/llamafile/iqk_mul_mat.inc b/third_party/llamafile/iqk_mul_mat.inc new file mode 100644 index 0000000..150a8f9 --- /dev/null +++ b/third_party/llamafile/iqk_mul_mat.inc @@ -0,0 +1,3050 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/iqk_mul_mat.inc +// Copyrigth 2024 Iwan Kawrakow. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +// -*- mode:c++;indent-tabs-mode:nil;c-basic-offset:4;coding:utf-8 -*- +// vi: set et ft=cpp fenc=utf-8 :vi +// +// Copyright 2024 Iwan Kawrakow +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#if defined __x86_64__ || defined __aarch64__ + +#include "llama.cpp/ggml-impl.h" +#include "llama.cpp/ggml-quants.h" +#include "sgemm.h" + +// For i-quants, I had to explicitely specify which +// functions to inline / not inline (at least for some +// of the functions), else performance would be significantly +// lower. This is worrysome as things can change with, +// e.g., a different compiler version or running on a different +// CPU. +#ifdef _MSC_VER +#define IQK_NOINLINE __declspec(noinline) +#define IQK_ALWAYS_INLINE inline +#else +#define IQK_NOINLINE __attribute__((__noinline__)) +#define IQK_ALWAYS_INLINE __attribute__((always_inline)) +#endif + +#define GGML_COMMON_IMPL_C +#include "llama.cpp/ggml-common.h" + +// clang-format off + +// This matrix - vector and matrix - matrix multiplication implementation +// for legacy quants, k-quants and i-quants makes prompt processing 150-200% +// (legacy and k-quants) or 250-400% (i-quants) faster. +// compared to mainline llama.cpp (and llamafile). +// It provides implementations for ARM_NEON (all quants) and AVX2 +// (all quants except sub-4 bit i-quants). +// +// Main idea is that unpacking the quants and the block scales to +// be ready for dot products with the corresponding Q8_Y quants +// takes time (here 'Y' stands for K, 0, or 1, depending on quantization type). +// Hence, if we are performing a QX x Q8_Y matrix matrix +// multiplication (as needed for prompt processing), we can get +// a significant speedup by reusing the unpacked QX quants and scales +// for multiplication with several Q8_K columns. We also achieve fewer +// loads from memory, which is the main purpose of tiling in general +// purpose matrix multiplication packages. + +#include +#include + +#endif + +namespace { + +typedef struct { + int32_t i1; + int32_t i2; +} mmid_row_mapping; + +struct DataInfo { + float * s; + const char * cy; + size_t bs; + size_t by; + int cur_y = 0; + int ne11; + const mmid_row_mapping * row_mapping = nullptr; + size_t bs2 = 0; + + inline const char * src1_row(int iy) const { + if (!row_mapping) return cy + (cur_y + iy)*by; + int i11 = row_mapping[cur_y + iy].i1 % ne11; + int i12 = row_mapping[cur_y + iy].i2; + return cy + (i11 + i12*ne11)*by; + } + + inline void store(int ix, int iy, float result) const { + *(dst_row(iy) + ix) = result; + //dst_row(iy)[ix] = result; + } + inline float * dst_row(int iy) const { + if (!row_mapping) return s + (cur_y + iy)*bs; + int i12 = row_mapping[cur_y + iy].i2; + int i1 = row_mapping[cur_y + iy].i1; + int i2 = i12; + return s + i1*bs + i2*bs2; + } +}; + +typedef void (*mul_mat_t)(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x); + +struct MulMat { + std::array funcs = {}; + //inline void mul_mat_NxM(int n, const void * vx, size_t bx, DataInfo& info, int nrc_x, int nrc_y) { + IQK_NOINLINE void mul_mat_NxM(int n, const void * vx, size_t bx, DataInfo& info, int nrc_x, int nrc_y) { + constexpr int k_x_step = 64; // This works best on my Ryzen-7950X and M2 Max CPUs (but differences to other tile size are small) + int n_step = (nrc_y - info.cur_y)/funcs.size(); + if (n_step > 0) { + for (int ix = 0; ix < nrc_x; ix += k_x_step) { + auto this_info = info; + this_info.s += ix; + int this_nrc_x = ix + k_x_step <= nrc_x ? k_x_step : nrc_x - ix; + for (int iy = 0; iy < n_step; ++iy) { + funcs.back()(n, (const void *)((const char *)vx + ix*bx), bx, this_info, this_nrc_x); + this_info.cur_y += funcs.size(); + } + } + info.cur_y += funcs.size() * n_step; + } + int n_left = nrc_y - info.cur_y; + if (n_left > 0) { + funcs[n_left-1](n, vx, bx, info, nrc_x); + } + } + static IQK_NOINLINE bool set_mul_mat(int typeA, int ne00, MulMat& mm, int& row_size_q8, int Ny); +private: + template static IQK_NOINLINE void set_functions(MulMat& m); +}; + +inline void make_q4_scales(const uint8_t * scales8, uint32_t * aux32) { + const uint16_t * scales = (const uint16_t *)scales8; + const uint32_t a0 = scales[0] | (scales[1] << 16); + const uint32_t a1 = scales[2] | (scales[3] << 16); + const uint32_t a2 = scales[4] | (scales[5] << 16); + aux32[3] = ((a2 >> 4) & 0x0f0f0f0f) | ((a1 >> 2) & 0x30303030); + aux32[1] = ((a2 >> 0) & 0x0f0f0f0f) | ((a0 >> 2) & 0x30303030); + aux32[2] = a1 & 0x3f3f3f3f; + aux32[0] = a0 & 0x3f3f3f3f; +} + +const uint64_t keven_signs[128] = { + 0x0101010101010101, 0xff010101010101ff, 0xff0101010101ff01, 0x010101010101ffff, + 0xff01010101ff0101, 0x0101010101ff01ff, 0x0101010101ffff01, 0xff01010101ffffff, + 0xff010101ff010101, 0x01010101ff0101ff, 0x01010101ff01ff01, 0xff010101ff01ffff, + 0x01010101ffff0101, 0xff010101ffff01ff, 0xff010101ffffff01, 0x01010101ffffffff, + 0xff0101ff01010101, 0x010101ff010101ff, 0x010101ff0101ff01, 0xff0101ff0101ffff, + 0x010101ff01ff0101, 0xff0101ff01ff01ff, 0xff0101ff01ffff01, 0x010101ff01ffffff, + 0x010101ffff010101, 0xff0101ffff0101ff, 0xff0101ffff01ff01, 0x010101ffff01ffff, + 0xff0101ffffff0101, 0x010101ffffff01ff, 0x010101ffffffff01, 0xff0101ffffffffff, + 0xff01ff0101010101, 0x0101ff01010101ff, 0x0101ff010101ff01, 0xff01ff010101ffff, + 0x0101ff0101ff0101, 0xff01ff0101ff01ff, 0xff01ff0101ffff01, 0x0101ff0101ffffff, + 0x0101ff01ff010101, 0xff01ff01ff0101ff, 0xff01ff01ff01ff01, 0x0101ff01ff01ffff, + 0xff01ff01ffff0101, 0x0101ff01ffff01ff, 0x0101ff01ffffff01, 0xff01ff01ffffffff, + 0x0101ffff01010101, 0xff01ffff010101ff, 0xff01ffff0101ff01, 0x0101ffff0101ffff, + 0xff01ffff01ff0101, 0x0101ffff01ff01ff, 0x0101ffff01ffff01, 0xff01ffff01ffffff, + 0xff01ffffff010101, 0x0101ffffff0101ff, 0x0101ffffff01ff01, 0xff01ffffff01ffff, + 0x0101ffffffff0101, 0xff01ffffffff01ff, 0xff01ffffffffff01, 0x0101ffffffffffff, + 0xffff010101010101, 0x01ff0101010101ff, 0x01ff01010101ff01, 0xffff01010101ffff, + 0x01ff010101ff0101, 0xffff010101ff01ff, 0xffff010101ffff01, 0x01ff010101ffffff, + 0x01ff0101ff010101, 0xffff0101ff0101ff, 0xffff0101ff01ff01, 0x01ff0101ff01ffff, + 0xffff0101ffff0101, 0x01ff0101ffff01ff, 0x01ff0101ffffff01, 0xffff0101ffffffff, + 0x01ff01ff01010101, 0xffff01ff010101ff, 0xffff01ff0101ff01, 0x01ff01ff0101ffff, + 0xffff01ff01ff0101, 0x01ff01ff01ff01ff, 0x01ff01ff01ffff01, 0xffff01ff01ffffff, + 0xffff01ffff010101, 0x01ff01ffff0101ff, 0x01ff01ffff01ff01, 0xffff01ffff01ffff, + 0x01ff01ffffff0101, 0xffff01ffffff01ff, 0xffff01ffffffff01, 0x01ff01ffffffffff, + 0x01ffff0101010101, 0xffffff01010101ff, 0xffffff010101ff01, 0x01ffff010101ffff, + 0xffffff0101ff0101, 0x01ffff0101ff01ff, 0x01ffff0101ffff01, 0xffffff0101ffffff, + 0xffffff01ff010101, 0x01ffff01ff0101ff, 0x01ffff01ff01ff01, 0xffffff01ff01ffff, + 0x01ffff01ffff0101, 0xffffff01ffff01ff, 0xffffff01ffffff01, 0x01ffff01ffffffff, + 0xffffffff01010101, 0x01ffffff010101ff, 0x01ffffff0101ff01, 0xffffffff0101ffff, + 0x01ffffff01ff0101, 0xffffffff01ff01ff, 0xffffffff01ffff01, 0x01ffffff01ffffff, + 0x01ffffffff010101, 0xffffffffff0101ff, 0xffffffffff01ff01, 0x01ffffffff01ffff, + 0xffffffffffff0101, 0x01ffffffffff01ff, 0x01ffffffffffff01, 0xffffffffffffffff, +}; + +} + +bool iqk_mul_mat(long Nx, long Ny, long ne00, int typeA, const void * A, const void * B, + float * C, long stride_C, int ith, int nth) { + + MulMat mm; + int row_size_q8; + if (!MulMat::set_mul_mat(typeA, ne00, mm, row_size_q8, Ny)) { + return false; + } + + auto row_size_qx = ggml_row_size((ggml_type)typeA, ne00); + + auto nrc_x = (Nx + nth - 1)/nth; + auto first_x = ith*nrc_x; + if (first_x + nrc_x > Nx) nrc_x = Nx - first_x; + + DataInfo info{C + first_x, (const char *)B, (size_t)stride_C, (size_t)row_size_q8, 0, 1, nullptr, 0}; + + mm.mul_mat_NxM(ne00, (const char *)A + row_size_qx*first_x, row_size_qx, info, nrc_x, Ny); + + return true; +} + +bool iqk_mul_mat_moe(long Nx, long Ny, long ne00, int ne11, int typeA, const void * A, const void * B, + float * C, long nb1, long nb2, const void * vrow_mapping, int ith, int nth) { + const mmid_row_mapping * row_mapping = (const mmid_row_mapping *)vrow_mapping; + assert(row_mapping != nullptr); + + MulMat mm; + int row_size_q8; + if (!MulMat::set_mul_mat(typeA, ne00, mm, row_size_q8, Ny)) { + return false; + } + int row_size_qx = ggml_row_size((ggml_type)typeA, ne00); + int nrc_x = (Nx + nth - 1)/nth; + int first_x = ith*nrc_x; + if (first_x + nrc_x > Nx) nrc_x = Nx - first_x; + DataInfo info{C + first_x, (const char *)B, nb1/sizeof(float), (size_t)row_size_q8, 0, ne11, row_mapping, nb2/sizeof(float)}; + mm.mul_mat_NxM(ne00, (const char *)A + row_size_qx*first_x, row_size_qx, info, nrc_x, Ny); + return true; +} + +#if defined __x86_64__ + +#if defined HAVE_FANCY_SIMD + #undef HAVE_FANCY_SIMD +#endif +#if defined(__AVX512F__) && defined(__AVX512VNNI__) && defined(__AVX512VL__) && defined(__AVX512BW__) && defined(__AVX512DQ__) + #define HAVE_FANCY_SIMD +#endif + +namespace { + +inline float hsum_float_4(__m128 x) { + x = _mm_add_ps(x, _mm_movehl_ps(x, x)); + x = _mm_add_ss(x, _mm_movehdup_ps(x)); + return _mm_cvtss_f32(x); +} +inline float hsum_float_8(__m256 x) { + return hsum_float_4(_mm_add_ps(_mm256_castps256_ps128(x), _mm256_extractf128_ps(x, 1))); +} + +#define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1) + + +template struct Q8 { + + constexpr static int nrc_y = nrc; + + Q8(const DataInfo& info) { + for (int iy = 0; iy < nrc_y; ++iy) y[iy] = (const block_q8 *)info.src1_row(iy); + } + +#ifdef HAVE_FANCY_SIMD + inline __m512i load_quants(int iy, int i, int j) const { return _mm512_loadu_si512((const __m512i*)y[iy][i].qs + j); } +#else + inline __m256i load_quants(int iy, int i, int j) const { return _mm256_loadu_si256((const __m256i*)y[iy][i].qs + j); } +#endif + inline __m256i load_bsums(int iy, int i) const { return _mm256_loadu_si256((const __m256i*)y[iy][i].bsums); } + inline float scale(int iy, int i) const { return y[iy][i].d; } + + const block_q8 * y[nrc_y]; +}; + +// Handles q4_K and q5_K scales/mins +struct Scales8K { + template + inline __m256i process_mins_and_scales(const uint8_t * data, float c, int i, const Q8& q8, __m256 * accd) { + make_q4_scales(data, utmp); + const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0])); + const __m128i mins128 = _mm256_extracti128_si256(mins_and_scales, 1); + accum_mins(mins128, q8, i, c, accd); + const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0); + return MM256_SET_M128I(sc128, sc128); + } +#ifdef HAVE_FANCY_SIMD + template + inline __m512i process_mins_and_scales_64(const uint8_t * data, float c, int i, const Q8& q8, __m256 * accd) { + auto scales = process_mins_and_scales(data, c, i, q8, accd); + return _mm512_inserti32x8(_mm512_castsi256_si512(scales), scales, 1); + } +#endif + template + inline void accum_mins(const __m128i& mins128, const Q8& q8, int i, float c, __m256 * accd) const { + const __m256i mins = MM256_SET_M128I(_mm_shuffle_epi8(mins128, shuffles[1]), _mm_shuffle_epi8(mins128, shuffles[0])); + for (int iy = 0; iy < Q8::nrc_y; ++iy) { + const __m256i q8s = q8.load_bsums(iy, i); + const __m256i prod = _mm256_madd_epi16(mins, q8s); + accd[iy] = _mm256_fmadd_ps(_mm256_set1_ps(c*q8.scale(iy, i)), _mm256_cvtepi32_ps(prod), accd[iy]); + } + } +#ifdef HAVE_FANCY_SIMD + const __m512i shuffles512[2] = { + _mm512_set_epi64(0x0706070607060706, 0x0302030203020302, 0x0706070607060706, 0x0302030203020302, + 0x0504050405040504, 0x0100010001000100, 0x0504050405040504, 0x0100010001000100), + _mm512_set_epi64(0x0f0e0f0e0f0e0f0e, 0x0b0a0b0a0b0a0b0a, 0x0f0e0f0e0f0e0f0e, 0x0b0a0b0a0b0a0b0a, + 0x0d0c0d0c0d0c0d0c, 0x0908090809080908, 0x0d0c0d0c0d0c0d0c, 0x0908090809080908) + }; +#endif + const __m128i shuffles[2] = {_mm_set_epi32(0x07060706, 0x05040504, 0x03020302, 0x01000100), + _mm_set_epi32(0x0f0e0f0e, 0x0d0c0d0c, 0x0b0a0b0a, 0x09080908)}; + + uint32_t utmp[4]; +}; + +template +inline void process_mins_16(const __m256i& all_scales, const Q8& q8, int i, float d, __m256 * accm) { + for (int iy = 0; iy < Q8::nrc_y; ++iy) { + const __m256i prod = _mm256_madd_epi16(all_scales, q8.load_bsums(iy, i)); + accm[iy] = _mm256_fmadd_ps(_mm256_set1_ps(d * q8.scale(iy, i)), _mm256_cvtepi32_ps(prod), accm[iy]); + } +} +inline void prepare_scales_16(const __m256i& all_scales, __m256i * scales) { + const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0); + const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1); + scales[0] = MM256_SET_M128I(l_scales, l_scales); + scales[1] = MM256_SET_M128I(h_scales, h_scales); +} + +struct ScaleQ3 { + inline __m128i make_scales(const uint16_t * s8) const { + const uint16_t * scales16 = (const uint16_t *)s8; + uint32_t aux0 = scales16[0] | (scales16[1] << 16); + uint32_t aux1 = scales16[2] | (scales16[3] << 16); + uint32_t aux2 = scales16[4] | (scales16[5] << 16); + __m128i scales128 = _mm_set_epi32( + ((aux1 >> 4) & 0x0f0f0f0f) | ((aux2 >> 2) & 0x30303030), + ((aux0 >> 4) & 0x0f0f0f0f) | ((aux2 >> 0) & 0x30303030), + (aux1 & 0x0f0f0f0f) | ((aux2 << 2) & 0x30303030), + (aux0 & 0x0f0f0f0f) | ((aux2 << 4) & 0x30303030)); + return _mm_add_epi8(scales128, m32); + } + const __m128i m32 = _mm_set1_epi8(-32); +}; + +struct ScaleIQ4XS { + inline __m128i make_scales(const uint32_t scales_l, const uint16_t scales_h) { + uint32_t tmp32 = scales_h | (scales_h << 14); + const __m128i sh = _mm_slli_epi16(_mm_and_si128(_mm_srlv_epi32(_mm_set1_epi32(tmp32), hshift), hmask), 4); + const __m128i sl = _mm_and_si128(_mm_srlv_epi32(_mm_set1_epi32(scales_l), lshift), lmask); + return _mm_add_epi16(_mm_or_si128(sh, _mm_cvtepi8_epi16(_mm_shuffle_epi8(sl, lshuffle))), m32); + } + const __m128i hshift = _mm_set_epi32(12, 8, 4, 0); + const __m128i lshift = _mm_set_epi32(4, 0, 4, 0); + const __m128i hmask = _mm_set1_epi16(0x03); + const __m128i lmask = _mm_set1_epi8(0xf); + const __m128i lshuffle = _mm_set_epi32(0x07030602, 0x05010400, 0x07030602, 0x05010400); + const __m128i m32 = _mm_set1_epi16(-32); +}; + +template +struct BaseDequantizer { + BaseDequantizer(const void * vx, size_t bx) : vx(vx), bx(bx) {} + inline void new_row(int ix) { + x = (const Block *)((const char *)vx + bx*ix); + } + + const void * vx; + size_t bx; + const Block * x; + + float d; +}; + +#ifdef HAVE_FANCY_SIMD +//====================================== Zen4 ================================================== + +struct BlockPermuter { + const __m512i permute1 = _mm512_set_epi64(11, 10, 9, 8, 3, 2, 1, 0); + const __m512i permute2 = _mm512_set_epi64(15, 14, 13, 12, 7, 6, 5, 4); +}; + +struct Q4Bits { + inline void prepare(const uint8_t * q4) { + auto q4bits = _mm512_loadu_si512((const __m512i*)q4 + 0); + auto tmp1 = _mm512_and_si512(q4bits, ml); + auto tmp2 = _mm512_and_si512(_mm512_srli_epi16(q4bits, 4), ml); + values[0] = _mm512_permutex2var_epi64(tmp1, perm.permute1, tmp2); + values[1] = _mm512_permutex2var_epi64(tmp1, perm.permute2, tmp2); + q4bits = _mm512_loadu_si512((const __m512i*)q4 + 1); + tmp1 = _mm512_and_si512(q4bits, ml); + tmp2 = _mm512_and_si512(_mm512_srli_epi16(q4bits, 4), ml); + values[2] = _mm512_permutex2var_epi64(tmp1, perm.permute1, tmp2); + values[3] = _mm512_permutex2var_epi64(tmp1, perm.permute2, tmp2); + } + inline void prepare64(const uint8_t * q4) { + auto q4bits = _mm512_loadu_si512((const __m512i*)q4 + 0); + values[0] = _mm512_and_si512(q4bits, ml); + values[1] = _mm512_and_si512(_mm512_srli_epi16(q4bits, 4), ml); + q4bits = _mm512_loadu_si512((const __m512i*)q4 + 1); + values[2] = _mm512_and_si512(q4bits, ml); + values[3] = _mm512_and_si512(_mm512_srli_epi16(q4bits, 4), ml); + } + __m512i values[4]; + const __m512i ml = _mm512_set1_epi8(0xf); + BlockPermuter perm; +}; + +struct Q2Bits { + inline void prepare(const uint8_t * q2) { + + auto q2bits = _mm512_loadu_si512((const __m512i*)q2); + auto tmp = _mm512_srli_epi16(q2bits, 2); + + values[0] = _mm512_permutex2var_epi64(q2bits, perm.permute1, tmp); + values[2] = _mm512_permutex2var_epi64(q2bits, perm.permute2, tmp); + values[1] = _mm512_and_si512(_mm512_srli_epi16(values[0], 4), ml); + values[3] = _mm512_and_si512(_mm512_srli_epi16(values[2], 4), ml); + values[0] = _mm512_and_si512(values[0], ml); + values[2] = _mm512_and_si512(values[2], ml); + } + __m512i values[4]; + const __m512i ml = _mm512_set1_epi8(0x03); + BlockPermuter perm; +}; + +struct DequantizerQ4K final : public BaseDequantizer { + DequantizerQ4K(const void * vx, size_t bx) : BaseDequantizer(vx, bx) {} + template + inline void new_block(int i, const Q8& q8, __m256 * accd, __m512i * scales) { + d = GGML_FP16_TO_FP32(x[i].d); + bits.prepare(x[i].qs); + auto all_scales = s8k.process_mins_and_scales_64(x[i].scales, -GGML_FP16_TO_FP32(x[i].dmin), i, q8, accd); + scales[0] = _mm512_shuffle_epi8(all_scales, s8k.shuffles512[0]); + scales[1] = _mm512_shuffle_epi8(all_scales, s8k.shuffles512[1]); + } + + Q4Bits bits; + Scales8K s8k; +}; + +struct DequantizerIQ4XS final : public BaseDequantizer { + DequantizerIQ4XS(const void * vx, size_t bx) : BaseDequantizer(vx, bx), values(load_values()) {} + template + inline void new_block(int i, const Q8& q8, __m256 * accd, __m512i * scales) { + d = GGML_FP16_TO_FP32(x[i].d); + prepare(x[i].qs); + auto scales128 = siq4.make_scales(*(const uint32_t *)x[i].scales_l, x[i].scales_h); + s8k.accum_mins(scales128, q8, i, -128.f*d, accd); + auto scales256 = MM256_SET_M128I(scales128, scales128); + auto all_scales = _mm512_inserti32x8(_mm512_castsi256_si512(scales256), scales256, 1); + scales[0] = _mm512_shuffle_epi8(all_scales, s8k.shuffles512[0]); + scales[1] = _mm512_shuffle_epi8(all_scales, s8k.shuffles512[1]); + } + static __m512i load_values() { + static const uint8_t kvalues_iq4nl[16] = {1, 24, 45, 63, 79, 93, 106, 118, 129, 141, 153, 166, 181, 197, 217, 241}; + auto val128 = _mm_loadu_si128((const __m128i *)kvalues_iq4nl); + auto val256 = MM256_SET_M128I(val128, val128); + return _mm512_inserti32x8(_mm512_castsi256_si512(val256), val256, 1); + } + inline void prepare(const uint8_t * q4) { + bits.prepare64(q4); + // We now have in bits.valuse[0]: 0...15, 32...47, 64...79, 96...111 + // bits.valuse[1]: 16..31, 48...63, 80...95, 112..127 + // etc. + auto tmp = _mm512_permutex2var_epi64(bits.values[0], permute1, bits.values[1]); + bits.values[1] = _mm512_shuffle_epi8(values, _mm512_permutex2var_epi64(bits.values[0], permute2, bits.values[1])); + bits.values[0] = _mm512_shuffle_epi8(values, tmp); + tmp = _mm512_permutex2var_epi64(bits.values[2], permute1, bits.values[3]); + bits.values[3] = _mm512_shuffle_epi8(values, _mm512_permutex2var_epi64(bits.values[2], permute2, bits.values[3])); + bits.values[2] = _mm512_shuffle_epi8(values, tmp); + } + + Q4Bits bits; + Scales8K s8k; + ScaleIQ4XS siq4; + const __m512i values; + const __m512i permute1 = _mm512_set_epi64(11, 10, 3, 2, 9, 8, 1, 0); + const __m512i permute2 = _mm512_set_epi64(15, 14, 7, 6, 13, 12, 5, 4); +}; + +struct HighBit5 { + inline void apply(const uint8_t * h, Q4Bits& bits) { + auto hbits256 = _mm256_loadu_si256((const __m256i *)h); + auto hbits = _mm512_inserti32x8(_mm512_castsi256_si512(hbits256), _mm256_srli_epi16(hbits256, 1), 1); + bits.values[0] = _mm512_or_si512(bits.values[0], _mm512_and_si512(_mm512_slli_epi16(hbits, 4), mh)); + bits.values[1] = _mm512_or_si512(bits.values[1], _mm512_and_si512(_mm512_slli_epi16(hbits, 2), mh)); + bits.values[2] = _mm512_or_si512(bits.values[2], _mm512_and_si512(hbits, mh)); + bits.values[3] = _mm512_or_si512(bits.values[3], _mm512_and_si512(_mm512_srli_epi16(hbits, 2), mh)); + } + const __m512i mh = _mm512_set1_epi8(0x10); +}; + +struct HighBit3 { + inline void apply(const uint8_t * h, Q2Bits& bits) { + auto hbits256 = _mm256_loadu_si256((const __m256i *)h); + auto hbits = _mm512_inserti32x8(_mm512_castsi256_si512(hbits256), _mm256_srli_epi16(hbits256, 1), 1); + bits.values[0] = _mm512_or_si512(bits.values[0], _mm512_and_si512(_mm512_slli_epi16(hbits, 2), mh)); + bits.values[1] = _mm512_or_si512(bits.values[1], _mm512_and_si512(hbits, mh)); + bits.values[2] = _mm512_or_si512(bits.values[2], _mm512_and_si512(_mm512_srli_epi16(hbits, 2), mh)); + bits.values[3] = _mm512_or_si512(bits.values[3], _mm512_and_si512(_mm512_srli_epi16(hbits, 4), mh)); + } + const __m512i mh = _mm512_set1_epi8(0x04); +}; + +struct DequantizerQ5K final : public BaseDequantizer { + DequantizerQ5K(const void * vx, size_t bx) : BaseDequantizer(vx, bx) {} + template + inline void new_block(int i, const Q8& q8, __m256 * accd, __m512i * scales) { + d = GGML_FP16_TO_FP32(x[i].d); + bits.prepare(x[i].qs); + hbits.apply(x[i].qh, bits); + auto all_scales = s8k.process_mins_and_scales_64(x[i].scales, -GGML_FP16_TO_FP32(x[i].dmin), i, q8, accd); + scales[0] = _mm512_shuffle_epi8(all_scales, s8k.shuffles512[0]); + scales[1] = _mm512_shuffle_epi8(all_scales, s8k.shuffles512[1]); + } + + Q4Bits bits; + HighBit5 hbits; + Scales8K s8k; +}; + +struct Scale16 { + inline void make_scales(const __m128i& scales8, __m512i * scales) const { + auto all_scales8 = MM256_SET_M128I(scales8, scales8); + auto scales1 = _mm256_shuffle_epi8(all_scales8, shuffle1); + auto scales2 = _mm256_shuffle_epi8(all_scales8, shuffle2); + scales[0] = _mm512_cvtepi8_epi16(scales1); + scales[1] = _mm512_cvtepi8_epi16(scales2); + } + template + inline void process_mins_and_scales(int i, float c, const __m128i& mins8, const __m128i& scales8, + const Q8& q8, __m256 * accm, __m512i * scales) const { + process_mins_16(_mm256_cvtepi8_epi16(mins8), q8, i, c, accm); + make_scales(scales8, scales); + } + const __m256i shuffle1 = _mm256_set_epi32(0x07070707, 0x03030303, 0x06060606, 0x02020202, + 0x05050505, 0x01010101, 0x04040404, 0x00000000); + const __m256i shuffle2 = _mm256_set_epi32(0x0f0f0f0f, 0x0b0b0b0b, 0x0e0e0e0e, 0x0a0a0a0a, + 0x0d0d0d0d, 0x09090909, 0x0c0c0c0c, 0x08080808); +}; + +struct DequantizerQ2K final : public BaseDequantizer { + DequantizerQ2K(const void * vx, size_t bx) : BaseDequantizer(vx, bx) {} + template + inline void new_block(int i, const Q8& q8, __m256 * accm, __m512i * scales) { + d = GGML_FP16_TO_FP32(x[i].d); + bits.prepare(x[i].qs); + const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales); + const __m128i scales8 = _mm_and_si128(mins_and_scales, m4); + const __m128i mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); + sc16.process_mins_and_scales(i, -GGML_FP16_TO_FP32(x[i].dmin), mins8, scales8, q8, accm, scales); + } + + Q2Bits bits; + Scale16 sc16; + const __m128i m4 = _mm_set1_epi8(0xf); + +}; + +struct DequantizerQ3K final : public BaseDequantizer { + DequantizerQ3K(const void * vx, size_t bx) : BaseDequantizer(vx, bx) {} + template + inline void new_block(int i, const Q8& q8, __m256 * accm, __m512i * scales) { + d = GGML_FP16_TO_FP32(x[i].d); + bits.prepare(x[i].qs); + hbits.apply(x[i].hmask, bits); + auto scales128 = sc3.make_scales((const uint16_t *)x[i].scales); + sc16.process_mins_and_scales(i, -4.f*d, scales128, scales128, q8, accm, scales); + } + + Q2Bits bits; + HighBit3 hbits; + ScaleQ3 sc3; + Scale16 sc16; + const __m128i m4 = _mm_set1_epi8(0xf); + const __m128i m32 = _mm_set1_epi8(-32); +}; + +struct DequantizerQ6K final : public BaseDequantizer { + DequantizerQ6K(const void * vx, size_t bx) : BaseDequantizer(vx, bx) {} + template + inline void new_block(int i, const Q8& q8, __m256 * accm, __m512i * scales) { + d = GGML_FP16_TO_FP32(x[i].d); + bits.prepare64(x[i].ql); + add_high_bits(x[i].qh, bits); + auto scales128 = _mm_loadu_si128((const __m128i *)x[i].scales); + sc16.process_mins_and_scales(i, -32.f*d, scales128, scales128, q8, accm, scales); + } + + inline void add_high_bits(const uint8_t * qh, Q4Bits& bits) const { + auto hbits = _mm512_loadu_si512((const __m512i *)qh); + auto tmp1 = _mm512_and_si512(_mm512_slli_epi16(hbits, 4), mh); + auto tmp2 = _mm512_and_si512(_mm512_slli_epi16(hbits, 2), mh); + bits.values[0] = _mm512_or_si512(bits.values[0], _mm512_permutex2var_epi64(tmp1, bits.perm.permute1, tmp2)); + bits.values[2] = _mm512_or_si512(bits.values[2], _mm512_permutex2var_epi64(tmp1, bits.perm.permute2, tmp2)); + tmp1 = _mm512_and_si512(hbits, mh); + tmp2 = _mm512_and_si512(_mm512_srli_epi16(hbits, 2), mh); + bits.values[1] = _mm512_or_si512(bits.values[1], _mm512_permutex2var_epi64(tmp1, bits.perm.permute1, tmp2)); + bits.values[3] = _mm512_or_si512(bits.values[3], _mm512_permutex2var_epi64(tmp1, bits.perm.permute2, tmp2)); + } + + Q4Bits bits; + HighBit3 hbits; + Scale16 sc16; + + const __m512i mh = _mm512_set1_epi8(0x30); + +}; + +template +static void mul_mat_qX_K_q8_K_T(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) { + assert(n % QK_K == 0); + const int nb = n / QK_K; + + Q8 q8(info); + + Dequantizer deq(vx, bx); + + __m256 accm[nrc_y]; + __m512 accd[nrc_y]; + __m512i scales[2]; + + for (int ix = 0; ix < nrc_x; ++ix) { + + for (int iy = 0; iy < nrc_y; ++iy) accd[iy] = _mm512_setzero_ps(); + for (int iy = 0; iy < nrc_y; ++iy) accm[iy] = _mm256_setzero_ps(); + + deq.new_row(ix); + + for (int i = 0; i < nb; ++i) { + + deq.new_block(i, q8, accm, scales); + + for (int iy = 0; iy < nrc_y; ++iy) { + const __m512i p1 = _mm512_dpbusd_epi32(_mm512_setzero_si512(), deq.bits.values[0], q8.load_quants(iy, i, 0)); + const __m512i p2 = _mm512_dpbusd_epi32(_mm512_setzero_si512(), deq.bits.values[1], q8.load_quants(iy, i, 1)); + const __m512i p3 = _mm512_dpbusd_epi32(_mm512_setzero_si512(), deq.bits.values[2], q8.load_quants(iy, i, 2)); + const __m512i p4 = _mm512_dpbusd_epi32(_mm512_setzero_si512(), deq.bits.values[3], q8.load_quants(iy, i, 3)); + auto sumi = _mm512_dpwssd_epi32(_mm512_setzero_si512(), scales[0], _mm512_packs_epi32(p1, p2)); + sumi = _mm512_dpwssd_epi32(sumi, scales[1], _mm512_packs_epi32(p3, p4)); + accd[iy] = _mm512_fmadd_ps(_mm512_set1_ps(deq.d*q8.scale(iy, i)), _mm512_cvtepi32_ps(sumi), accd[iy]); + } + + } + + for (int iy = 0; iy < nrc_y; ++iy) { + auto sum256 = _mm256_add_ps(_mm512_castps512_ps256(accd[iy]), _mm512_extractf32x8_ps(accd[iy], 1)); + info.store(ix, iy, hsum_float_8(_mm256_add_ps(accm[iy], sum256))); + } + + } +} + +#else +// ===================================== Vanilla AVX2 ===================================== + +struct Q4Bits { + inline void prepare(const uint8_t * q4, int j) { + auto q4bits = _mm256_loadu_si256((const __m256i*)q4 + 2*j+0); + values[0] = _mm256_and_si256(q4bits, ml); + values[1] = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), ml); + q4bits = _mm256_loadu_si256((const __m256i*)q4 + 2*j+1); + values[2] = _mm256_and_si256(q4bits, ml); + values[3] = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), ml); + } + inline void prepare64(const uint8_t * q4, int j) { + auto q4bits = _mm256_loadu_si256((const __m256i*)q4 + 2*j+0); + values[0] = _mm256_and_si256(q4bits, ml); + values[2] = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), ml); + q4bits = _mm256_loadu_si256((const __m256i*)q4 + 2*j+1); + values[1] = _mm256_and_si256(q4bits, ml); + values[3] = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), ml); + } + inline void prepare16(const uint8_t * q4, int j) { + values[0] = dequant16(q4 + 64*j + 0); + values[1] = dequant16(q4 + 64*j + 16); + values[2] = dequant16(q4 + 64*j + 32); + values[3] = dequant16(q4 + 64*j + 48); + } + inline __m256i dequant16(const uint8_t * qs) const { + const __m128i aux128 = _mm_loadu_si128((const __m128i *)qs); + const __m256i aux256 = MM256_SET_M128I(_mm_srli_epi16(aux128, 4), aux128); + return _mm256_and_si256(ml, aux256); + }; + __m256i values[4]; + const __m256i ml = _mm256_set1_epi8(0xf); +}; + +struct Q2Bits { + inline void prepare(const uint8_t * q2, int j) { + auto q2bits = _mm256_loadu_si256((const __m256i *)q2 + j); + values[0] = _mm256_and_si256(q2bits, ml); + values[1] = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), ml); + values[2] = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), ml); + values[3] = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), ml); + } + __m256i values[4]; + const __m256i ml = _mm256_set1_epi8(0x03); +}; + +struct HighBit5 { + inline void load(const uint8_t * h) { hbits = _mm256_loadu_si256((const __m256i *)h); } + inline void apply(Q4Bits& bits, bool do_shift) { + bits.values[0] = _mm256_or_si256(bits.values[0], _mm256_and_si256(_mm256_slli_epi16(hbits, 4), mh)); + bits.values[1] = _mm256_or_si256(bits.values[1], _mm256_and_si256(_mm256_slli_epi16(hbits, 3), mh)); + bits.values[2] = _mm256_or_si256(bits.values[2], _mm256_and_si256(_mm256_slli_epi16(hbits, 2), mh)); + bits.values[3] = _mm256_or_si256(bits.values[3], _mm256_and_si256(_mm256_slli_epi16(hbits, 1), mh)); + if (do_shift) { + hbits = _mm256_srli_epi16(hbits, 4); + } + } + const __m256i mh = _mm256_set1_epi8(0x10); + __m256i hbits; +}; + +struct HighBit3 { + inline void load(const uint8_t * h) { hbits = _mm256_loadu_si256((const __m256i *)h); } + inline void apply(Q2Bits& bits, bool do_shift) { + bits.values[0] = _mm256_or_si256(bits.values[0], _mm256_and_si256(_mm256_slli_epi16(hbits, 2), mh)); + bits.values[1] = _mm256_or_si256(bits.values[1], _mm256_and_si256(_mm256_slli_epi16(hbits, 1), mh)); + bits.values[2] = _mm256_or_si256(bits.values[2], _mm256_and_si256(hbits, mh)); + bits.values[3] = _mm256_or_si256(bits.values[3], _mm256_and_si256(_mm256_srli_epi16(hbits, 1), mh)); + if (do_shift) { + hbits = _mm256_srli_epi16(hbits, 4); + } + } + const __m256i mh = _mm256_set1_epi8(0x04); + __m256i hbits; +}; + +inline __m256i get_scale_shuffle_8(int i) { + return _mm256_set1_epi16((2*i) | ((2*i+1) << 8)); +} + +inline void set_scales_8(const __m256i& all_scales, int j, __m256i * scales) { + scales[0] = _mm256_shuffle_epi8(all_scales, get_scale_shuffle_8(4*j+0)); + scales[1] = _mm256_shuffle_epi8(all_scales, get_scale_shuffle_8(4*j+1)); + scales[2] = _mm256_shuffle_epi8(all_scales, get_scale_shuffle_8(4*j+2)); + scales[3] = _mm256_shuffle_epi8(all_scales, get_scale_shuffle_8(4*j+3)); +} + +template +inline void multiply_add(const Bits& bits, const __m256i * scales, int j, int i, const Q8& q8, __m256i * sumi) { + if (j == 0) { + for (int iy = 0; iy < Q8::nrc_y; ++iy) { + const __m256i p1 = _mm256_madd_epi16(scales[0], _mm256_maddubs_epi16(bits.values[0], q8.load_quants(iy, i, 0))); + const __m256i p2 = _mm256_madd_epi16(scales[1], _mm256_maddubs_epi16(bits.values[1], q8.load_quants(iy, i, 1))); + const __m256i p3 = _mm256_madd_epi16(scales[2], _mm256_maddubs_epi16(bits.values[2], q8.load_quants(iy, i, 2))); + const __m256i p4 = _mm256_madd_epi16(scales[3], _mm256_maddubs_epi16(bits.values[3], q8.load_quants(iy, i, 3))); + sumi[iy] = _mm256_add_epi32(_mm256_add_epi32(p1, p3), _mm256_add_epi32(p2, p4)); + } + } else { + for (int iy = 0; iy < Q8::nrc_y; ++iy) { + const __m256i p1 = _mm256_madd_epi16(scales[0], _mm256_maddubs_epi16(bits.values[0], q8.load_quants(iy, i, 4))); + const __m256i p2 = _mm256_madd_epi16(scales[1], _mm256_maddubs_epi16(bits.values[1], q8.load_quants(iy, i, 5))); + const __m256i p3 = _mm256_madd_epi16(scales[2], _mm256_maddubs_epi16(bits.values[2], q8.load_quants(iy, i, 6))); + const __m256i p4 = _mm256_madd_epi16(scales[3], _mm256_maddubs_epi16(bits.values[3], q8.load_quants(iy, i, 7))); + sumi[iy] = _mm256_add_epi32(sumi[iy], _mm256_add_epi32(p1, p3)); + sumi[iy] = _mm256_add_epi32(sumi[iy], _mm256_add_epi32(p2, p4)); + } + } +} + +struct DequantizerQ4K final : public BaseDequantizer { + DequantizerQ4K(const void * vx, size_t bx) : BaseDequantizer(vx, bx) {} + template + inline __m256i new_block(int i, const Q8& q8, __m256 * accd) { + d = GGML_FP16_TO_FP32(x[i].d); + return s8k.process_mins_and_scales(x[i].scales, -GGML_FP16_TO_FP32(x[i].dmin), i, q8, accd); + } + inline void prepare(int i, int j) { + bits.prepare(x[i].qs, j); + } + + Q4Bits bits; + Scales8K s8k; +}; + +struct DequantizerIQ4XS final : public BaseDequantizer { + DequantizerIQ4XS(const void * vx, size_t bx) : BaseDequantizer(vx, bx), values(load_values()) {} + template + inline __m256i new_block(int i, const Q8& q8, __m256 * accd) { + d = GGML_FP16_TO_FP32(x[i].d); + auto scales128 = siq4.make_scales(*(const uint32_t *)x[i].scales_l, x[i].scales_h); + s8k.accum_mins(scales128, q8, i, -128.f*d, accd); + return MM256_SET_M128I(scales128, scales128); + } + inline void prepare(int i, int j) { + bits.prepare16(x[i].qs, j); + bits.values[0] = _mm256_shuffle_epi8(values, bits.values[0]); + bits.values[1] = _mm256_shuffle_epi8(values, bits.values[1]); + bits.values[2] = _mm256_shuffle_epi8(values, bits.values[2]); + bits.values[3] = _mm256_shuffle_epi8(values, bits.values[3]); + } + + static __m256i load_values() { + static const uint8_t kvalues_iq4nl[16] = {1, 24, 45, 63, 79, 93, 106, 118, 129, 141, 153, 166, 181, 197, 217, 241}; + auto val128 = _mm_loadu_si128((const __m128i *)kvalues_iq4nl); + return MM256_SET_M128I(val128, val128); + } + + Q4Bits bits; + Scales8K s8k; + ScaleIQ4XS siq4; + const __m256i values; +}; + +struct DequantizerQ5K final : public BaseDequantizer { + DequantizerQ5K(const void * vx, size_t bx) : BaseDequantizer(vx, bx) {} + template + inline __m256i new_block(int i, const Q8& q8, __m256 * accd) { + d = GGML_FP16_TO_FP32(x[i].d); + hbits.load(x[i].qh); + return s8k.process_mins_and_scales(x[i].scales, -GGML_FP16_TO_FP32(x[i].dmin), i, q8, accd); + } + inline void prepare(int i, int j) { + bits.prepare(x[i].qs, j); + hbits.apply(bits, j == 0); + } + + Q4Bits bits; + HighBit5 hbits; + Scales8K s8k; +}; + +template +inline void process_mins_and_scales_16(const __m128i& scales128, const Q8& q8, int i, float d, + __m256 * accm, __m256i * scales) { + const __m256i all_scales = _mm256_cvtepi8_epi16(scales128); + process_mins_16(all_scales, q8, i, d, accm); + prepare_scales_16(all_scales, scales); +} + +struct DequantizerQ3K final : public BaseDequantizer { + DequantizerQ3K(const void * vx, size_t bx) : BaseDequantizer(vx, bx) {} + + template + inline void new_block(int i, const Q8& q8, __m256 * accm, __m256i * scales) { + d = GGML_FP16_TO_FP32(x[i].d); + hbits.load(x[i].hmask); + process_mins_and_scales_16(sc3.make_scales((const uint16_t *)x[i].scales), q8, i, -4.f*d, accm, scales); + } + inline void prepare(int i, int j) { + bits.prepare(x[i].qs, j); + hbits.apply(bits, j == 0); + } + + Q2Bits bits; + HighBit3 hbits; + ScaleQ3 sc3; + + const __m128i m32 = _mm_set1_epi8(-32); +}; + +struct DequantizerQ2K final : public BaseDequantizer { + DequantizerQ2K(const void * vx, size_t bx) : BaseDequantizer(vx, bx) {} + + template + inline void new_block(int i, const Q8& q8, __m256 * accm, __m256i * scales) { + d = GGML_FP16_TO_FP32(x[i].d); + const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales); + const __m128i scales8 = _mm_and_si128(mins_and_scales, m4); + const __m128i mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); + process_mins_16(_mm256_cvtepi8_epi16(mins8), q8, i, -GGML_FP16_TO_FP32(x[i].dmin), accm); + prepare_scales_16(_mm256_cvtepi8_epi16(scales8), scales); + } + inline void prepare(int i, int j) { + bits.prepare(x[i].qs, j); + } + + Q2Bits bits; + + const __m128i m4 = _mm_set1_epi8(0xf); +}; + +struct DequantizerQ6K final : public BaseDequantizer { + DequantizerQ6K(const void * vx, size_t bx) : BaseDequantizer(vx, bx) {} + template + inline void new_block(int i, const Q8& q8, __m256 * accm, __m256i * scales) { + d = GGML_FP16_TO_FP32(x[i].d); + process_mins_and_scales_16(_mm_loadu_si128((const __m128i *)x[i].scales), q8, i, -32.f*d, accm, scales); + } + inline void prepare(int i, int j) { + bits.prepare64(x[i].ql, j); + auto hbits = _mm256_loadu_si256((const __m256i *)x[i].qh + j); + bits.values[0] = _mm256_or_si256(bits.values[0], _mm256_and_si256(_mm256_slli_epi16(hbits, 4), mh)); + bits.values[1] = _mm256_or_si256(bits.values[1], _mm256_and_si256(_mm256_slli_epi16(hbits, 2), mh)); + bits.values[2] = _mm256_or_si256(bits.values[2], _mm256_and_si256(hbits, mh)); + bits.values[3] = _mm256_or_si256(bits.values[3], _mm256_and_si256(_mm256_srli_epi16(hbits, 2), mh)); + } + + Q4Bits bits; + const __m256i mh = _mm256_set1_epi8(0x30); +}; + +inline __m256i get_scale_shuffle_16(int i) { + static const uint8_t k_shuffle[128] = { + 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, + 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, + 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, + 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15, + }; + return _mm256_loadu_si256((const __m256i*)k_shuffle + i); +} + +inline void set_scales_16(const __m256i& all_scales, __m256i * scales) { + scales[0] = _mm256_shuffle_epi8(all_scales, get_scale_shuffle_16(0)); + scales[1] = _mm256_shuffle_epi8(all_scales, get_scale_shuffle_16(1)); + scales[2] = _mm256_shuffle_epi8(all_scales, get_scale_shuffle_16(2)); + scales[3] = _mm256_shuffle_epi8(all_scales, get_scale_shuffle_16(3)); +} + +template +static void mul_mat_qY_K_q8_K_T(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) { + assert(n%QK_K == 0); + const int nb = n/QK_K; + + Q8 q8(info); + + __m256i all_scales[2]; + __m256i scales[4]; + __m256 accd[nrc_y]; + + Dequantizer deq(vx, bx); + + for (int ix = 0; ix < nrc_x; ++ix) { + + deq.new_row(ix); + + for (int iy = 0; iy < nrc_y; ++iy) accd[iy] = _mm256_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + + deq.new_block(i, q8, accd, all_scales); + + __m256i sumi[nrc_y]; + + for (int j = 0; j < QK_K/128; ++j) { + deq.prepare(i, j); + set_scales_16(all_scales[j], scales); + multiply_add(deq.bits, scales, j, i, q8, sumi); + } + + for (int iy = 0; iy < nrc_y; ++iy) { + accd[iy] = _mm256_fmadd_ps(_mm256_set1_ps(deq.d*q8.scale(iy, i)), _mm256_cvtepi32_ps(sumi[iy]), accd[iy]); + } + + } + + for (int iy = 0; iy < nrc_y; ++iy) { + info.store(ix, iy, hsum_float_8(accd[iy])); + } + + } + +} + +template +static void mul_mat_qX_K_q8_K_T(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) { + assert(n % QK_K == 0); + const int nb = n / QK_K; + + Q8 q8(info); + + Dequantizer deq(vx, bx); + + __m256 accd[nrc_y]; + __m256i scales[4]; + + for (int ix = 0; ix < nrc_x; ++ix) { + + for (int iy = 0; iy < nrc_y; ++iy) accd[iy] = _mm256_setzero_ps(); + + deq.new_row(ix); + + for (int i = 0; i < nb; ++i) { + + auto all_scales = deq.new_block(i, q8, accd); + + __m256i sumi[nrc_y]; + + for (int j = 0; j < QK_K/128; ++j) { + + deq.prepare(i, j); + + set_scales_8(all_scales, j, scales); + + multiply_add(deq.bits, scales, j, i, q8, sumi); + + } + + for (int iy = 0; iy < nrc_y; ++iy) { + const __m256 vd = _mm256_set1_ps(deq.d*q8.scale(iy, i)); + accd[iy] = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi[iy]), accd[iy]); + } + + } + + for (int iy = 0; iy < nrc_y; ++iy) { + info.store(ix, iy, hsum_float_8(accd[iy])); + } + + } +} +#endif // Zen4 or vanilla AVX2 + +// +// ============================== Legacy quants +// + +struct DotHelper { + const __m256i m1 = _mm256_set1_epi16(1); +#if defined(__AVX512VNNI__) && defined(__AVX512VL__) + inline __m256i dot(__m256i x, __m256i y) const { + return _mm256_dpbusd_epi32(_mm256_setzero_si256(), x, y); + } +#else + inline __m256i dot(__m256i x, __m256i y) const { + return _mm256_madd_epi16(m1, _mm256_maddubs_epi16(x, y)); + } +#endif +}; + +struct SignedDot { + DotHelper helper; + inline __m256i compute(__m256i x, __m256i y) const { + return helper.dot(_mm256_sign_epi8(x, x), _mm256_sign_epi8(y, x)); + } +}; +struct UnsignedDot { + DotHelper helper; + inline __m256i compute(__m256i x, __m256i y) const { + return helper.dot(x, y); + } +}; +template struct Sum4 { + Dot dot; + inline __m256i compute(const __m256i * qx, const Q8 * y) const { + const __m256i p0 = dot.compute(qx[0], _mm256_loadu_si256((const __m256i *)y[0].qs)); + const __m256i p1 = dot.compute(qx[1], _mm256_loadu_si256((const __m256i *)y[1].qs)); + const __m256i p2 = dot.compute(qx[2], _mm256_loadu_si256((const __m256i *)y[2].qs)); + const __m256i p3 = dot.compute(qx[3], _mm256_loadu_si256((const __m256i *)y[3].qs)); + const __m256i p01 = _mm256_madd_epi16(dot.helper.m1, _mm256_packs_epi32(p0, p1)); // 0,0, 1,1, 0,0, 1,1 + const __m256i p23 = _mm256_madd_epi16(dot.helper.m1, _mm256_packs_epi32(p2, p3)); // 2,2, 3,3, 2,2, 3,3 + return _mm256_madd_epi16(dot.helper.m1, _mm256_packs_epi32(p01, p23)); // 0,1,2,3, 0,1,2,3 + } +}; + +struct Sum4_Q8 { + SignedDot dot; + static inline __m256i add1(__m256i a, __m256i b) { + return _mm256_add_epi32(_mm256_unpacklo_epi32(a, b), _mm256_unpackhi_epi32(a, b)); + } + static inline __m256i add2(__m256i a, __m256i b) { + return _mm256_add_epi32(_mm256_unpacklo_epi64(a, b), _mm256_unpackhi_epi64(a, b)); + } + inline __m256i compute(const __m256i * qx, const block_q8_0 * y) const { + const __m256i p0 = dot.compute(qx[0], _mm256_loadu_si256((const __m256i *)y[0].qs)); + const __m256i p1 = dot.compute(qx[1], _mm256_loadu_si256((const __m256i *)y[1].qs)); + const __m256i p2 = dot.compute(qx[2], _mm256_loadu_si256((const __m256i *)y[2].qs)); + const __m256i p3 = dot.compute(qx[3], _mm256_loadu_si256((const __m256i *)y[3].qs)); + const __m256i p01 = add1(p0, p1); // 0,1, 0,1, 0,1, 0,1 + const __m256i p23 = add1(p2, p3); // 2,3, 2,3, 2,3, 2,3 + return add2(p01, p23); // returns 0,1,2,3, 0,1,2,3 + } +}; + +struct ScaleHelperQ_0 { + ggml_half scales8[4]; + template + inline __m128 prepare4(const Q * y) { + for (int j = 0; j < 4; ++j) scales8[j] = y[j].d; + return _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)scales8)); + } + template + inline __m128 prepare4(__m128 other_scales, const Q * y) { + return _mm_mul_ps(other_scales, prepare4(y)); + } + template inline float prepare1(const Q * y) const { return GGML_FP16_TO_FP32(y->d); } + template inline float prepare1(float d, const Q * y) const { return d*prepare1(y); } +}; + +struct ScaleHelperQ_1 { + uint32_t scales8[4]; + const __m128i shuffle = _mm_set_epi16(0x0f0e, 0x0b0a, 0x0706, 0x0302, 0x0d0c, 0x0908, 0x0504, 0x0100); + + template + inline __m256 prepare4(const Q * y) { + for (int j = 0; j < 4; ++j) { + // it is slightly faster to directly dereference (const uint32 *)&y[j].d, but some compilers + // complain that this breaks strict-aliasing rules. + memcpy(scales8 + j, &y[j].d, sizeof(uint32_t)); + } + return _mm256_cvtph_ps(_mm_shuffle_epi8(_mm_loadu_si128((const __m128i *)scales8), shuffle)); + } + + template + inline __m256 prepare4(__m256 other_scales, const Q * y) { + return _mm256_mul_ps(other_scales, prepare4(y)); + } + + template inline std::pair prepare1(const Q * y) const { + return std::make_pair(GGML_FP16_TO_FP32(y->d), GGML_FP16_TO_FP32(y->m)); + } + template inline std::pair prepare1(const std::pair& dm, const Q * y) const { + return std::make_pair(dm.first*GGML_FP16_TO_FP32(y->d), dm.second*GGML_FP16_TO_FP32(y->m)); + } + std::pair inline prepare1(const std::pair& dm, const block_q8_1 * y) const { + return std::make_pair(dm.first*GGML_FP16_TO_FP32(y->d), dm.second*GGML_FP16_TO_FP32(y->s)); + } +}; + +struct MinusType0 { + inline __m256 compute(__m128 d, int) const { return _mm256_set_m128(d, d); } + inline float compute(float d, int) const { return d; } + inline float result(__m256 acc, int) const { return hsum_float_8(acc); } +}; + +template struct MinusType1 { + __m128 accm[nrc_y]; + MinusType1() { for (int iy = 0; iy < nrc_y; ++iy) accm[iy] = _mm_setzero_ps(); } + inline __m256 compute(__m256 dm, int iy) { + const __m128 d = _mm256_castps256_ps128(dm); + const __m128 m = _mm256_extractf128_ps(dm, 1); + accm[iy] = _mm_add_ps(accm[iy], m); + return _mm256_set_m128(d, d); + } + inline float compute(const std::pair& dm, int iy) { + accm[iy] = _mm_add_ps(accm[iy], _mm_set1_ps(dm.second*0.25f)); + return dm.first; + } + inline float result(__m256 acc, int iy) const { + const __m128 sum = _mm_add_ps(_mm256_castps256_ps128(acc), _mm256_extractf128_ps(acc, 1)); + return hsum_float_4(_mm_add_ps(sum, accm[iy])); + } +}; + +template struct AccumT { + __m256 acc[nrc_y]; + Minus accm; + AccumT() { for (int iy = 0; iy < nrc_y; ++iy) acc[iy] = _mm256_setzero_ps(); } + template + inline void compute(int nb, Unpacker& unp, Scales& scales, Sum& sum, const Q8 ** y, const DataInfo& info, int ix) { + auto qx = unp.quants(); + __m256 dall[nrc_y]; + for (int i = 0; i < nb/4; ++i) { + auto other_scales = unp.set_block_4(i); + for (int iy = 0; iy < nrc_y; ++iy) { + auto s12 = scales.prepare4(other_scales, y[iy] + 4*i); + dall[iy] = accm.compute(s12, iy); + } + for (int iy = 0; iy < nrc_y; ++iy) { + auto pall = sum.compute(qx, y[iy] + 4*i); + acc[iy] = _mm256_fmadd_ps(dall[iy], _mm256_cvtepi32_ps(pall), acc[iy]); + } + } + if (!is_multiple_of_4) { + for (int i = 4*(nb/4); i < nb; ++i) { + auto other_scales = unp.set_block(i); + for (int iy = 0; iy < nrc_y; ++iy) { + auto s12 = scales.prepare1(other_scales, y[iy] + i); + auto d = accm.compute(s12, iy); + const __m256i p0 = sum.dot.compute(qx[0], _mm256_loadu_si256((const __m256i *)y[iy][i].qs)); + acc[iy] = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(p0), acc[iy]); + } + } + } + for (int iy = 0; iy < nrc_y; ++iy) { + info.store(ix, iy, accm.result(acc[iy], iy)); + //s[iy*bs] = accm.result(acc[iy], iy); + } + } +}; + +template +using AccumType0 = AccumT; + +template +using AccumType1 = AccumT, nrc_y, is_multiple_of_4>; + +using Sum4Type0 = Sum4; +using Sum4Type1 = Sum4; + +template +void mul_mat_qX_q8_Helper(int nb, const void * vx, size_t bx, const DataInfo& info, const Q8 ** y, int nrc_x) { + Unpacker unp(vx, bx); + Sum4Type sum4; + Scales scales; + for (int ix = 0; ix < nrc_x; ++ix) { + unp.set_row(ix); + AccumType accum; + accum.compute(nb, unp, scales, sum4, y, info, ix); + } +} + +template +void mul_mat_qX_0_q8_0_T(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) { + assert(n%Unpacker::block_size() == 0); + Q8 q8(info); + int nb = n/Unpacker::block_size(); + if (nb%4 == 0) { + mul_mat_qX_q8_Helper, ScaleHelperQ_0, block_q8_0, nrc_y>( + nb, vx, bx, info, q8.y, nrc_x + ); + } else { + mul_mat_qX_q8_Helper, ScaleHelperQ_0, block_q8_0, nrc_y>( + nb, vx, bx, info, q8.y, nrc_x + ); + } +} + +template +void mul_mat_qX_1_q8_1_T(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) { + assert(n%Unpacker::block_size() == 0); + Q8 q8(info); + int nb = n/Unpacker::block_size(); + if (nb%4 == 0) { + mul_mat_qX_q8_Helper, ScaleHelperQ_1, block_q8_1, nrc_y>( + nb, vx, bx, info, q8.y, nrc_x + ); + } else { + mul_mat_qX_q8_Helper, ScaleHelperQ_1, block_q8_1, nrc_y>( + nb, vx, bx, info, q8.y, nrc_x + ); + } +} + +struct Dequantizer4bit { + const __m256i m4 = _mm256_set1_epi8(0xf); + inline __m256i dequant(const uint8_t * qs) const { + const __m128i aux128 = _mm_loadu_si128((const __m128i *)qs); + return _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(aux128, 4), aux128), m4); + } +}; + +struct Q8_0_Dequantizer { + inline __m256i dequant(const block_q8_0 * x) const { + return _mm256_loadu_si256((const __m256i *)x->qs); + } +}; + +struct Q4_0_Dequantizer { + Dequantizer4bit b4; + const __m256i m8 = _mm256_set1_epi8(-8); + inline __m256i dequant(const block_q4_0 * x) const { + return _mm256_add_epi8(b4.dequant(x->qs), m8); + } +}; + +struct Q4_1_Dequantizer { + Dequantizer4bit b4; + inline __m256i dequant(const block_q4_1 * x) const { + return b4.dequant(x->qs); + } +}; + +struct HBitDequantizer { + const __m256i shuffle = _mm256_set_epi64x(0x0303030303030303, 0x0202020202020202, 0x0101010101010101, 0x0000000000000000); + const __m256i mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe); + const __m256i minus1 = _mm256_set1_epi64x(-1); + inline __m256i to_bytes(const uint8_t * bits) const { + // Note: Data in all ggml quants is at least 2-byte aligned. + // => we can cast to uint16_t and use or on two consecutive entries + // which is faster than memcpy + const uint16_t * aux16 = (const uint16_t *)bits; + const uint32_t aux32 = aux16[0] | (aux16[1] << 16); + //uint32_t aux32; memcpy(&aux32, bits, sizeof(uint32_t)); + __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(aux32), shuffle); + bytes = _mm256_or_si256(bytes, mask); + return _mm256_cmpeq_epi8(bytes, minus1); + } +}; + +struct Q5_0_Dequantizer { + Dequantizer4bit b4; + HBitDequantizer hbit; + const __m256i mh = _mm256_set1_epi8((char)0xF0); + inline __m256i dequant(const block_q5_0 * x) const { + const __m256i vqh = _mm256_andnot_si256(hbit.to_bytes(x->qh), mh); + return _mm256_or_si256(b4.dequant(x->qs), vqh); + } +}; + +struct Q5_1_Dequantizer { + Dequantizer4bit b4; + HBitDequantizer hbit; + const __m256i mh = _mm256_set1_epi8(0x10); + inline __m256i dequant(const block_q5_1 * x) const { + const __m256i vqh = _mm256_and_si256(hbit.to_bytes(x->qh), mh); + return _mm256_or_si256(b4.dequant(x->qs), vqh); + } +}; + +template +struct Q_Unpacker { + Q_Unpacker(const void * vx, size_t bx) : cx_0((const char *)vx), x((const Q*)cx_0), bx(bx) {} + + const char * cx_0; + const Q * x; + size_t bx; + + Scales scales; + Dequantizer deq; + + __m256i qx[4]; + + inline const __m256i* quants() const { return qx; } + + inline void set_row(int ix) { x = (const Q*)(cx_0 + ix*bx); } + + inline auto set_block_4(int i) { + for (int j = 0; j < 4; ++j) { + qx[j] = deq.dequant(x + 4*i + j); + } + return scales.prepare4(x + 4*i); + } + inline auto set_block(int i) { + qx[0] = deq.dequant(x + i); + return scales.prepare1(x + i); + } +}; + +struct Q8_0_Unpacker final : public Q_Unpacker { + Q8_0_Unpacker(const void * vx, size_t bx) : Q_Unpacker(vx, bx) {} + inline static int block_size() { return QK4_0; } +}; +struct Q4_0_Unpacker final : public Q_Unpacker { + Q4_0_Unpacker(const void * vx, size_t bx) : Q_Unpacker(vx, bx) {} + inline static int block_size() { return QK4_0; } +}; +struct Q5_0_Unpacker final : public Q_Unpacker { + Q5_0_Unpacker(const void * vx, size_t bx) : Q_Unpacker(vx, bx) {} + inline static int block_size() { return QK5_0; } +}; +struct Q4_1_Unpacker final : public Q_Unpacker { + Q4_1_Unpacker(const void * vx, size_t bx) : Q_Unpacker(vx, bx) {} + inline static int block_size() { return QK4_1; } +}; +struct Q5_1_Unpacker final : public Q_Unpacker { + Q5_1_Unpacker(const void * vx, size_t bx) : Q_Unpacker(vx, bx) {} + inline static int block_size() { return QK4_1; } +}; + +template +void mul_mat_q8_0_q8_0_T(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) { + assert(n%Q8_0_Unpacker::block_size() == 0); + Q8 q8(info); + int nb = n/Q8_0_Unpacker::block_size(); + if (nb%4 == 0) { + mul_mat_qX_q8_Helper, ScaleHelperQ_0, block_q8_0, nrc_y>( + nb, vx, bx, info, q8.y, nrc_x + ); + } else { + mul_mat_qX_q8_Helper, ScaleHelperQ_0, block_q8_0, nrc_y>( + nb, vx, bx, info, q8.y, nrc_x + ); + } +} + +template void MulMat::set_functions(MulMat& m) { + if constexpr (std::is_same_v || std::is_same_v) { + m.funcs[0] = mul_mat_qX_0_q8_0_T; + m.funcs[1] = mul_mat_qX_0_q8_0_T; + m.funcs[2] = mul_mat_qX_0_q8_0_T; + m.funcs[3] = mul_mat_qX_0_q8_0_T; + m.funcs[4] = mul_mat_qX_0_q8_0_T; + m.funcs[5] = mul_mat_qX_0_q8_0_T; + m.funcs[6] = mul_mat_qX_0_q8_0_T; + m.funcs[7] = mul_mat_qX_0_q8_0_T; + } + else if constexpr (std::is_same_v || std::is_same_v) { + m.funcs[0] = mul_mat_qX_1_q8_1_T; + m.funcs[1] = mul_mat_qX_1_q8_1_T; + m.funcs[2] = mul_mat_qX_1_q8_1_T; + m.funcs[3] = mul_mat_qX_1_q8_1_T; + m.funcs[4] = mul_mat_qX_1_q8_1_T; + m.funcs[5] = mul_mat_qX_1_q8_1_T; + m.funcs[6] = mul_mat_qX_1_q8_1_T; + m.funcs[7] = mul_mat_qX_1_q8_1_T; + } + else { +#ifdef HAVE_FANCY_SIMD + m.funcs[0] = mul_mat_qX_K_q8_K_T; + m.funcs[1] = mul_mat_qX_K_q8_K_T; + m.funcs[2] = mul_mat_qX_K_q8_K_T; + m.funcs[3] = mul_mat_qX_K_q8_K_T; + m.funcs[4] = mul_mat_qX_K_q8_K_T; + m.funcs[5] = mul_mat_qX_K_q8_K_T; + m.funcs[6] = mul_mat_qX_K_q8_K_T; + m.funcs[7] = mul_mat_qX_K_q8_K_T; +#else + if constexpr (std::is_same_v || + std::is_same_v || + std::is_same_v) { + m.funcs[0] = mul_mat_qY_K_q8_K_T; + m.funcs[1] = mul_mat_qY_K_q8_K_T; + m.funcs[2] = mul_mat_qY_K_q8_K_T; + m.funcs[3] = mul_mat_qY_K_q8_K_T; + m.funcs[4] = mul_mat_qY_K_q8_K_T; + m.funcs[5] = mul_mat_qY_K_q8_K_T; + m.funcs[6] = mul_mat_qY_K_q8_K_T; + m.funcs[7] = mul_mat_qY_K_q8_K_T; + } else { + m.funcs[0] = mul_mat_qX_K_q8_K_T; + m.funcs[1] = mul_mat_qX_K_q8_K_T; + m.funcs[2] = mul_mat_qX_K_q8_K_T; + m.funcs[3] = mul_mat_qX_K_q8_K_T; + m.funcs[4] = mul_mat_qX_K_q8_K_T; + m.funcs[5] = mul_mat_qX_K_q8_K_T; + m.funcs[6] = mul_mat_qX_K_q8_K_T; + m.funcs[7] = mul_mat_qX_K_q8_K_T; + } +#endif + } +} + +bool MulMat::set_mul_mat(int typeA, int ne00, MulMat& mm, int& row_size_q8, int) { + + row_size_q8 = ggml_row_size(GGML_TYPE_Q8_K, ne00); + + switch (typeA) { + case GGML_TYPE_Q2_K: + assert (ne00 % QK_K == 0); + MulMat::set_functions(mm); + break; + case GGML_TYPE_Q3_K: + assert (ne00 % QK_K == 0); + MulMat::set_functions(mm); + break; + case GGML_TYPE_Q4_K: + assert (ne00 % QK_K == 0); + MulMat::set_functions(mm); + break; + case GGML_TYPE_Q5_K: + assert (ne00 % QK_K == 0); + MulMat::set_functions(mm); + break; + case GGML_TYPE_Q6_K: + assert (ne00 % QK_K == 0); + MulMat::set_functions(mm); + break; + case GGML_TYPE_IQ4_XS: + assert (ne00 % QK_K == 0); + MulMat::set_functions(mm); + break; + case GGML_TYPE_Q4_0: + assert (ne00 % QK4_0 == 0); + MulMat::set_functions(mm); + row_size_q8 = ggml_row_size(GGML_TYPE_Q8_0, ne00); + break; + case GGML_TYPE_Q4_1: + assert (ne00 % QK4_1 == 0); + MulMat::set_functions(mm); + row_size_q8 = ggml_row_size(GGML_TYPE_Q8_1, ne00); + break; + case GGML_TYPE_Q5_0: + assert (ne00 % QK5_0 == 0); + MulMat::set_functions(mm); + row_size_q8 = ggml_row_size(GGML_TYPE_Q8_0, ne00); + break; + case GGML_TYPE_Q5_1: + assert (ne00 % QK5_1 == 0); + MulMat::set_functions(mm); + row_size_q8 = ggml_row_size(GGML_TYPE_Q8_1, ne00); + break; + + default: + return false; + } + + return true; +} + +} // namespace + + +#else // __aarch64__ + +namespace { + +template struct Q8 { + + constexpr static int nrc_y = nrc; + + Q8(const DataInfo& info) { + for (int iy = 0; iy < nrc_y; ++iy) y[iy] = (const block_q8 *)info.src1_row(iy); + } + + inline int8x16_t load_quants_16(int iy, int i, int j) const { return vld1q_s8(y[iy][i].qs + 16*j); } + inline int8x16x2_t load_quants(int iy, int i, int j) const { return vld1q_s8_x2(y[iy][i].qs + 32*j); } + inline int8x16x4_t load_quants_64(int iy, int i, int j) const { return vld1q_s8_x4(y[iy][i].qs + 64*j); } + inline int16x8x2_t load_bsums(int iy, int i) const { return vld1q_s16_x2(y[iy][i].bsums); } + inline int16x8_t load_bsums8(int iy, int i) const { + auto q8s = vld1q_s16_x2(y[iy][i].bsums); + return vpaddq_s16(q8s.val[0], q8s.val[1]); + } + inline float scale(int iy, int i) const { return y[iy][i].d; } + + const block_q8 * y[nrc_y]; +}; + +template +IQK_NOINLINE void mul_mat_qX_K_q8_K_T(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) { + assert(n % QK_K == 0); + const int nb = n / QK_K; + + Q8 q8(info); + + Dequantizer deq(vx, bx, nrc_y); + + for (int ix = 0; ix < nrc_x; ++ix) { + + deq.new_row(ix); + + float32x4_t acc[nrc_y]; + for (int iy = 0; iy < nrc_y; ++iy) acc[iy] = vdupq_n_f32(0.f); + +//#pragma GCC unroll 4 + for (int i = 0; i < nb; ++i) { + + int32x4_t sumi[nrc_y]; + for (int iy = 0; iy < nrc_y; ++iy) sumi[iy] = vdupq_n_s32(0); + + if constexpr (nrc_y > 1 && Dequantizer::should_scale_quants()) { + deq.process_scales(i, q8, acc); + deq.prepare(i, 0); + deq.compute(q8, i, 0, sumi); + deq.prepare(i, 1); + deq.compute(q8, i, 1, sumi); + } else { + if constexpr (Dequantizer::num_blocks() == 8) { + auto scales = deq.new_block(i, q8, acc); + deq.prepare(i, 0); +#pragma GCC unroll 8 + for (int iy = 0; iy < nrc_y; ++iy) compute_8_blocks(deq.bits.b1, deq.bits.b2, q8, scales, iy, i, 0, sumi[iy]); + deq.prepare(i, 1); +#pragma GCC unroll 8 + for (int iy = 0; iy < nrc_y; ++iy) compute_8_blocks(deq.bits.b1, deq.bits.b2, q8, scales, iy, i, 1, sumi[iy]); + } + else if constexpr (Dequantizer::num_blocks() == 16) { + auto scales = deq.new_block(i, q8, acc); + deq.prepare(i, 0); +#pragma GCC unroll 8 + for (int iy = 0; iy < nrc_y; ++iy) compute_16_blocks(deq.bits.b1, deq.bits.b2, q8, scales, iy, i, 0, sumi[iy]); + deq.prepare(i, 1); +#pragma GCC unroll 8 + for (int iy = 0; iy < nrc_y; ++iy) compute_16_blocks(deq.bits.b1, deq.bits.b2, q8, scales, iy, i, 1, sumi[iy]); + } + else { + GGML_ASSERT(false); + } + } + +#pragma GCC unroll 8 + for (int iy = 0; iy < nrc_y; ++iy) { + acc[iy] = vmlaq_f32(acc[iy], vcvtq_f32_s32(sumi[iy]), vdupq_n_f32(deq.d*q8.scale(iy, i))); + } + } + +#pragma GCC unroll 8 + for (int iy = 0; iy < nrc_y; ++iy) { + info.store(ix, iy, vaddvq_f32(acc[iy])); + } + } +} +template +IQK_NOINLINE void mul_mat_qX_K_q8_K_IQ(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) { + assert(n % QK_K == 0); + const int nb = n / QK_K; + + Q8 q8(info); + + Dequantizer deq(vx, bx, nrc_y); + + for (int ix = 0; ix < nrc_x; ++ix) { + + deq.new_row(ix); + + float32x4_t acc[nrc_y]; + for (int iy = 0; iy < nrc_y; ++iy) acc[iy] = vdupq_n_f32(0.f); + + for (int i = 0; i < nb; ++i) { + + int32x4_t sumi[nrc_y]; + for (int iy = 0; iy < nrc_y; ++iy) sumi[iy] = vdupq_n_s32(0); + + if constexpr (Dequantizer::num_blocks() == 8) { + auto scales = deq.new_block(i); + deq.prepare(i, 0); +#pragma GCC unroll 8 + for (int iy = 0; iy < nrc_y; ++iy) compute_8_blocks(deq.bits.b1, deq.bits.b2, q8, scales, iy, i, 0, sumi[iy]); + deq.prepare(i, 1); +#pragma GCC unroll 8 + for (int iy = 0; iy < nrc_y; ++iy) compute_8_blocks(deq.bits.b1, deq.bits.b2, q8, scales, iy, i, 1, sumi[iy]); + } + else if constexpr (Dequantizer::num_blocks() == 16) { + auto scales = deq.new_block(i); + deq.prepare(i, 0); +#pragma GCC unroll 8 + for (int iy = 0; iy < nrc_y; ++iy) compute_16_blocks(deq.bits.b1, deq.bits.b2, q8, scales, iy, i, 0, sumi[iy]); + deq.prepare(i, 1); +#pragma GCC unroll 8 + for (int iy = 0; iy < nrc_y; ++iy) compute_16_blocks(deq.bits.b1, deq.bits.b2, q8, scales, iy, i, 1, sumi[iy]); + } + else { + GGML_ASSERT(false); + } +#pragma GCC unroll 8 + for (int iy = 0; iy < nrc_y; ++iy) { + acc[iy] = vmlaq_f32(acc[iy], vcvtq_f32_s32(sumi[iy]), vdupq_n_f32(deq.d*q8.scale(iy, i))); + } + } +#pragma GCC unroll 8 + for (int iy = 0; iy < nrc_y; ++iy) { + info.store(ix, iy, vaddvq_f32(acc[iy])); + } + } +} + +template +IQK_ALWAYS_INLINE void compute_8_blocks(const uint8x16x4_t& qx_1, const uint8x16x4_t& qx_2, const Q8& q8, + const int32x4x2_t& scales, int iy, int i, int j, int32x4_t& sumi) { + auto mzero = vdupq_n_s32(0); + const int8x16_t * qs_1 = (const int8x16_t *)qx_1.val; + const int8x16_t * qs_2 = (const int8x16_t *)qx_2.val; + + auto q8b_1 = q8.load_quants(iy, i, 4*j+0); + auto p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, qs_1[0], q8b_1.val[0]), qs_1[1], q8b_1.val[1]); // block 1 + auto q8b_2 = q8.load_quants(iy, i, 4*j+1); + auto p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, qs_1[2], q8b_2.val[0]), qs_1[3], q8b_2.val[1]); // block 2 + auto p12 = vpaddq_s32(p1, p2); + + auto q8b_3 = q8.load_quants(iy, i, 4*j+2); + auto p3 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, qs_2[0], q8b_3.val[0]), qs_2[1], q8b_3.val[1]); // block 3 + auto q8b_4 = q8.load_quants(iy, i, 4*j+3); + auto p4 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, qs_2[2], q8b_4.val[0]), qs_2[3], q8b_4.val[1]); // block 4 + auto p34 = vpaddq_s32(p3, p4); + + auto pall = vpaddq_s32(p12, p34); + sumi = vmlaq_s32(sumi, scales.val[j], pall); +} +template +IQK_ALWAYS_INLINE void compute_8_blocks(const int8x16_t * qx, const Q8& q8, + const int32x4_t& scales, int iy, int i, int j, int32x4_t& sumi) { + auto mzero = vdupq_n_s32(0); + + auto q8b_1 = q8.load_quants(iy, i, 4*j+0); + auto p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, qx[0], q8b_1.val[0]), qx[1], q8b_1.val[1]); // block 1 + auto q8b_2 = q8.load_quants(iy, i, 4*j+1); + auto p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, qx[2], q8b_2.val[0]), qx[3], q8b_2.val[1]); // block 2 + auto p12 = vpaddq_s32(p1, p2); + + auto q8b_3 = q8.load_quants(iy, i, 4*j+2); + auto p3 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, qx[4], q8b_3.val[0]), qx[5], q8b_3.val[1]); // block 3 + auto q8b_4 = q8.load_quants(iy, i, 4*j+3); + auto p4 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, qx[6], q8b_4.val[0]), qx[7], q8b_4.val[1]); // block 4 + auto p34 = vpaddq_s32(p3, p4); + + auto pall = vpaddq_s32(p12, p34); + sumi = vmlaq_s32(sumi, scales, pall); +} + +template +IQK_ALWAYS_INLINE void compute_16_blocks(const uint8x16x4_t& qx_1, const uint8x16x4_t& qx_2, const Q8& q8, + const int32x4x4_t& scales, int iy, int i, int j, int32x4_t& sumi) { + + auto mzero = vdupq_n_s32(0); + auto q8b_1 = q8.load_quants(iy, i, 4*j+0); + auto p1 = vpaddq_s32(ggml_vdotq_s32(mzero, vreinterpretq_s8_u8(qx_1.val[0]), q8b_1.val[0]), + ggml_vdotq_s32(mzero, vreinterpretq_s8_u8(qx_1.val[1]), q8b_1.val[1])); // blocks 0, 0, 1, 1, + auto q8b_2 = q8.load_quants(iy, i, 4*j+1); + auto p2 = vpaddq_s32(ggml_vdotq_s32(mzero, vreinterpretq_s8_u8(qx_1.val[2]), q8b_2.val[0]), + ggml_vdotq_s32(mzero, vreinterpretq_s8_u8(qx_1.val[3]), q8b_2.val[1])); // blocks 3, 3, 4, 4, + auto p12 = vpaddq_s32(p1, p2); // blocks 0, 1, 2, 3 + sumi = vmlaq_s32(sumi, scales.val[2*j+0], p12); + + auto q8b_3 = q8.load_quants(iy, i, 4*j+2); + auto p3 = vpaddq_s32(ggml_vdotq_s32(mzero, vreinterpretq_s8_u8(qx_2.val[0]), q8b_3.val[0]), + ggml_vdotq_s32(mzero, vreinterpretq_s8_u8(qx_2.val[1]), q8b_3.val[1])); // block 4, 4, 5, 5, + auto q8b_4 = q8.load_quants(iy, i, 4*j+3); + auto p4 = vpaddq_s32(ggml_vdotq_s32(mzero, vreinterpretq_s8_u8(qx_2.val[2]), q8b_4.val[0]), + ggml_vdotq_s32(mzero, vreinterpretq_s8_u8(qx_2.val[3]), q8b_4.val[1])); // block 6, 6, 7, 7, + auto p34 = vpaddq_s32(p3, p4); // blocks 4, 5, 6, 7 + sumi = vmlaq_s32(sumi, scales.val[2*j+1], p34); +} + +template +inline void accum_mins_8(const int16x8_t& mins, const Q8& q8, float32x4_t * acc, int i, float c) { + for (int iy = 0; iy < Q8::nrc_y; ++iy) { + auto q8s = q8.load_bsums8(iy, i); + int32x4_t b1 = vmull_s16(vget_low_s16(mins), vget_low_s16(q8s)); + int32x4_t b2 = vmull_s16(vget_high_s16(mins), vget_high_s16(q8s)); + float32x4_t prod = vcvtq_f32_s32(vaddq_s32(b1, b2)); + acc[iy] = vmlaq_f32(acc[iy], prod, vdupq_n_f32(c*q8.scale(iy, i))); + } +} +template +inline void accum_mins_16(const int16x8x2_t& mins, const Q8& q8, float32x4_t * acc, int i, float c) { + for (int iy = 0; iy < Q8::nrc_y; ++iy) { + auto q8s = q8.load_bsums(iy, i); + int32x4_t b1 = vmull_s16(vget_low_s16 (mins.val[0]), vget_low_s16 (q8s.val[0])); + int32x4_t b2 = vmull_s16(vget_high_s16(mins.val[0]), vget_high_s16(q8s.val[0])); + int32x4_t b3 = vmull_s16(vget_low_s16 (mins.val[1]), vget_low_s16 (q8s.val[1])); + int32x4_t b4 = vmull_s16(vget_high_s16(mins.val[1]), vget_high_s16(q8s.val[1])); + float32x4_t prod = vcvtq_f32_s32(vaddq_s32(vaddq_s32(b1, b2), vaddq_s32(b3, b4))); + acc[iy] = vmlaq_f32(acc[iy], prod, vdupq_n_f32(c*q8.scale(iy, i))); + } +} + +struct Scales8 { + uint32_t utmp[4]; + const uint8_t * sc8 = (const uint8_t *)utmp; + template + inline int32x4x2_t process_scales_mins(const Qx& x, const Q8& q8, int i, float32x4_t * acc) { + make_q4_scales(x.scales, utmp); + int16x8_t mins = vmovl_s8(vld1_s8((const int8_t *)sc8 + 8)); + accum_mins_8(mins, q8, acc, i, -GGML_FP16_TO_FP32(x.dmin)); + + uint8x8_t scales8 = vld1_u8(sc8); + uint16x8_t scales16 = vmovl_u8(scales8); + int32x4x2_t scales = {vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales16))), + vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales16)))}; + return scales; + } +}; + +struct Q4bits { + const uint8x16_t m4b = vdupq_n_u8(0xf); + uint8x16x4_t b1, b2; + inline void prepare4(uint8x16x4_t& b, const uint8x16_t * val) const { + b.val[0] = vandq_u8(val[0], m4b); + b.val[2] = vshrq_n_u8(val[0], 4); + b.val[1] = vandq_u8(val[1], m4b); + b.val[3] = vshrq_n_u8(val[1], 4); + } + inline void prepare4_16(uint8x16x4_t& b, const uint8x16_t * val) const { + b.val[0] = vandq_u8(val[0], m4b); + b.val[1] = vshrq_n_u8(val[0], 4); + b.val[2] = vandq_u8(val[1], m4b); + b.val[3] = vshrq_n_u8(val[1], 4); + } + inline void prepare(const uint8_t * qs) { + auto q4bits = vld1q_u8_x2(qs); + prepare4(b1, q4bits.val); + q4bits = vld1q_u8_x2(qs+32); + prepare4(b2, q4bits.val); + } + inline void prepare_v2(const uint8_t * qs) { + auto q4bits = vld1q_u8_x4(qs); + prepare4(b1, q4bits.val+0); + prepare4(b2, q4bits.val+2); + } + inline void prepare64(const uint8_t * qs) { + auto q4bits = vld1q_u8_x4(qs); + b1.val[0] = vandq_u8(q4bits.val[0], m4b); + b1.val[1] = vandq_u8(q4bits.val[1], m4b); + b1.val[2] = vandq_u8(q4bits.val[2], m4b); + b1.val[3] = vandq_u8(q4bits.val[3], m4b); + b2.val[0] = vshrq_n_u8(q4bits.val[0], 4); + b2.val[1] = vshrq_n_u8(q4bits.val[1], 4); + b2.val[2] = vshrq_n_u8(q4bits.val[2], 4); + b2.val[3] = vshrq_n_u8(q4bits.val[3], 4); + } + inline void prepare16(const uint8_t * qs) { + auto q4bits = vld1q_u8_x2(qs); + prepare4_16(b1, q4bits.val); + q4bits = vld1q_u8_x2(qs+32); + prepare4_16(b2, q4bits.val); + } + inline void prepare16_v2(const uint8_t * qs) { + auto q4bits = vld1q_u8_x4(qs); + prepare4_16(b1, q4bits.val+0); + prepare4_16(b2, q4bits.val+2); + } +}; + +struct Q2bits { + const uint8x16_t m4b = vdupq_n_u8(0x03); + uint8x16x4_t b1, b2; + inline void prepare(const uint8_t * qs) { + auto q2bits = vld1q_u8_x2(qs); + b1.val[0] = vandq_u8(q2bits.val[0], m4b); + b1.val[1] = vandq_u8(q2bits.val[1], m4b); + + q2bits.val[0] = vshrq_n_u8(q2bits.val[0], 2); + q2bits.val[1] = vshrq_n_u8(q2bits.val[1], 2); + b1.val[2] = vandq_u8(q2bits.val[0], m4b); + b1.val[3] = vandq_u8(q2bits.val[1], m4b); + + q2bits.val[0] = vshrq_n_u8(q2bits.val[0], 2); + q2bits.val[1] = vshrq_n_u8(q2bits.val[1], 2); + b2.val[0] = vandq_u8(q2bits.val[0], m4b); + b2.val[1] = vandq_u8(q2bits.val[1], m4b); + + q2bits.val[0] = vshrq_n_u8(q2bits.val[0], 2); + q2bits.val[1] = vshrq_n_u8(q2bits.val[1], 2); + b2.val[2] = vandq_u8(q2bits.val[0], m4b); + b2.val[3] = vandq_u8(q2bits.val[1], m4b); + } +}; + +template +struct BaseDequantizer { + BaseDequantizer(const void * vx, size_t bx, int nrc) : vx(vx), x(nullptr), bx(bx), nrc(nrc) {} + inline void new_row(int ix) { x = (const block_q *)((const char *)vx + ix*bx); } + const void * vx; + const block_q * x; + const size_t bx; + const int nrc; +}; + +struct DequantizerQ4K final : public BaseDequantizer { + DequantizerQ4K(const void * vx, size_t bx, int nrc) : BaseDequantizer(vx, bx, nrc) {} + + constexpr static int num_blocks() { return 8; } + constexpr static bool should_scale_quants() { return false; } + + template + inline int32x4x2_t new_block(int i, const Q8& q8, float32x4_t * acc) { + d = GGML_FP16_TO_FP32(x[i].d); + return s8.process_scales_mins(x[i], q8, i, acc); + } + inline void prepare(int i, int j) { + if (nrc == 1) bits.prepare_v2(x[i].qs+64*j); + else bits.prepare(x[i].qs+64*j); + } + + Q4bits bits; + Scales8 s8; + + float d; +}; + +struct HighBit5 { + const uint8x16_t mhb = vdupq_n_u8(0x10); + uint8x16x2_t bits; + inline void apply(uint8x16x4_t& b1, uint8x16x4_t& b2, bool do_shift) { + b1.val[0] = vorrq_u8(b1.val[0], vandq_u8(vshlq_n_u8(bits.val[0], 4), mhb)); + b1.val[1] = vorrq_u8(b1.val[1], vandq_u8(vshlq_n_u8(bits.val[1], 4), mhb)); + b1.val[2] = vorrq_u8(b1.val[2], vandq_u8(vshlq_n_u8(bits.val[0], 3), mhb)); + b1.val[3] = vorrq_u8(b1.val[3], vandq_u8(vshlq_n_u8(bits.val[1], 3), mhb)); + + b2.val[0] = vorrq_u8(b2.val[0], vandq_u8(vshlq_n_u8(bits.val[0], 2), mhb)); + b2.val[1] = vorrq_u8(b2.val[1], vandq_u8(vshlq_n_u8(bits.val[1], 2), mhb)); + b2.val[2] = vorrq_u8(b2.val[2], vandq_u8(vshlq_n_u8(bits.val[0], 1), mhb)); + b2.val[3] = vorrq_u8(b2.val[3], vandq_u8(vshlq_n_u8(bits.val[1], 1), mhb)); + + if (do_shift) { + bits.val[0] = vshrq_n_u8(bits.val[0], 4); + bits.val[1] = vshrq_n_u8(bits.val[1], 4); + } + } +}; + +struct HighBit3 { + const uint8x16_t mhb = vdupq_n_u8(0x04); + uint8x16x2_t bits; + inline void apply(uint8x16x4_t& b1, uint8x16x4_t& b2, bool do_shift) { + b1.val[0] = vorrq_u8(b1.val[0], vandq_u8(vshlq_n_u8(bits.val[0], 2), mhb)); + b1.val[1] = vorrq_u8(b1.val[1], vandq_u8(vshlq_n_u8(bits.val[1], 2), mhb)); + b1.val[2] = vorrq_u8(b1.val[2], vandq_u8(vshlq_n_u8(bits.val[0], 1), mhb)); + b1.val[3] = vorrq_u8(b1.val[3], vandq_u8(vshlq_n_u8(bits.val[1], 1), mhb)); + + b2.val[0] = vorrq_u8(b2.val[0], vandq_u8(bits.val[0], mhb)); + b2.val[1] = vorrq_u8(b2.val[1], vandq_u8(bits.val[1], mhb)); + b2.val[2] = vorrq_u8(b2.val[2], vandq_u8(vshrq_n_u8(bits.val[0], 1), mhb)); + b2.val[3] = vorrq_u8(b2.val[3], vandq_u8(vshrq_n_u8(bits.val[1], 1), mhb)); + + if (do_shift) { + bits.val[0] = vshrq_n_u8(bits.val[0], 4); + bits.val[1] = vshrq_n_u8(bits.val[1], 4); + } + } +}; + +struct DequantizerQ5K final : public BaseDequantizer { + DequantizerQ5K(const void * vx, size_t bx, int nrc) : BaseDequantizer(vx, bx, nrc) {} + + constexpr static int num_blocks() { return 8; } + constexpr static bool should_scale_quants() { return false; } + + template + inline int32x4x2_t new_block(int i, const Q8& q8, float32x4_t * acc) { + d = GGML_FP16_TO_FP32(x[i].d); + h.bits = vld1q_u8_x2(x[i].qh); + return s8.process_scales_mins(x[i], q8, i, acc); + } + inline void prepare(int i, int j) { + bits.prepare(x[i].qs+64*j); + h.apply(bits.b1, bits.b2, j == 0); + } + + Q4bits bits; + HighBit5 h; + Scales8 s8; + + uint8x16x2_t hbits; + + float d; +}; + +inline int32x4x4_t make_wider(const int16x8x2_t& scales16) { + int32x4x4_t scales = { + vmovl_s16(vget_low_s16 (scales16.val[0])), + vmovl_s16(vget_high_s16(scales16.val[0])), + vmovl_s16(vget_low_s16 (scales16.val[1])), + vmovl_s16(vget_high_s16(scales16.val[1])), + }; + return scales; +} + +template +inline int32x4x4_t process_scales_mins_16(const int8x16_t& scales8, const Q8& q8, float32x4_t * acc, int i, float c) { + int16x8x2_t scales16; + scales16.val[0] = vmovl_s8(vget_low_s8(scales8)); + scales16.val[1] = vmovl_s8(vget_high_s8(scales8)); + accum_mins_16(scales16, q8, acc, i, c); + return make_wider(scales16); +} + +struct DequantizerQ6K final : public BaseDequantizer { + DequantizerQ6K(const void * vx, size_t bx, int nrc) : BaseDequantizer(vx, bx, nrc) {} + + constexpr static int num_blocks() { return 16; } + constexpr static bool should_scale_quants() { return false; } + + template + inline int32x4x4_t new_block(int i, const Q8& q8, float32x4_t * acc) { + d = GGML_FP16_TO_FP32(x[i].d); + return process_scales_mins_16(vld1q_s8(x[i].scales), q8, acc, i, -32.f*d); + } + inline void prepare(int i, int j) { + + auto hbits = vld1q_u8_x2(x[i].qh + 32*j); + + bits.prepare64(x[i].ql+64*j); + bits.b1.val[0] = vorrq_u8(bits.b1.val[0], vandq_u8(vshlq_n_u8(hbits.val[0], 4), mhb)); + bits.b1.val[1] = vorrq_u8(bits.b1.val[1], vandq_u8(vshlq_n_u8(hbits.val[1], 4), mhb)); + bits.b1.val[2] = vorrq_u8(bits.b1.val[2], vandq_u8(vshlq_n_u8(hbits.val[0], 2), mhb)); + bits.b1.val[3] = vorrq_u8(bits.b1.val[3], vandq_u8(vshlq_n_u8(hbits.val[1], 2), mhb)); + + bits.b2.val[0] = vorrq_u8(bits.b2.val[0], vandq_u8(hbits.val[0], mhb)); + bits.b2.val[1] = vorrq_u8(bits.b2.val[1], vandq_u8(hbits.val[1], mhb)); + bits.b2.val[2] = vorrq_u8(bits.b2.val[2], vandq_u8(vshrq_n_u8(hbits.val[0], 2), mhb)); + bits.b2.val[3] = vorrq_u8(bits.b2.val[3], vandq_u8(vshrq_n_u8(hbits.val[1], 2), mhb)); + + } + + Q4bits bits; + + const uint8x16_t mhb = vdupq_n_u8(0x30); + + float d; +}; + +struct DequantizerQ3K final : public BaseDequantizer { + DequantizerQ3K(const void * vx, size_t bx, int nrc) : BaseDequantizer(vx, bx, nrc) {} + + constexpr static int num_blocks() { return 16; } + constexpr static bool should_scale_quants() { return false; } + + template + inline int32x4x4_t new_block(int i, const Q8& q8, float32x4_t * acc) { + d = GGML_FP16_TO_FP32(x[i].d); + h.bits = vld1q_u8_x2(x[i].hmask); + const uint16_t * sc16 = (const uint16_t *)x[i].scales; + uint32_t aux0 = sc16[0] | (sc16[1] << 16); + uint32_t aux1 = sc16[2] | (sc16[3] << 16); + uint32_t aux2 = sc16[4] | (sc16[5] << 16); + aux32[0] = (aux0 & 0x0f0f0f0f) | ((aux2 << 4) & 0x30303030); + aux32[1] = (aux1 & 0x0f0f0f0f) | ((aux2 << 2) & 0x30303030); + aux32[2] = ((aux0 >> 4) & 0x0f0f0f0f) | ((aux2 >> 0) & 0x30303030); + aux32[3] = ((aux1 >> 4) & 0x0f0f0f0f) | ((aux2 >> 2) & 0x30303030); + return process_scales_mins_16(vaddq_s8(vld1q_s8((const int8_t *)aux32), vdupq_n_s8(-32)), q8, acc, i, -4.f*d); + } + + inline void prepare(int i, int j) { + bits.prepare(x[i].qs+32*j); + h.apply(bits.b1, bits.b2, j == 0); + } + + uint32_t aux32[4]; + + Q2bits bits; + + HighBit3 h; + + float d; +}; + +struct DequantizerQ2K final : public BaseDequantizer { + DequantizerQ2K(const void * vx, size_t bx, int nrc) : BaseDequantizer(vx, bx, nrc) {} + + constexpr static int num_blocks() { return 16; } + constexpr static bool should_scale_quants() { return true; } + + template + inline void process_scales(int i, const Q8& q8, float32x4_t * acc) { + d = GGML_FP16_TO_FP32(x[i].d); + auto scales_and_mins = vld1q_u8(x[i].scales); + auto mins8 = vreinterpretq_s8_u8(vshrq_n_u8(scales_and_mins, 4)); + int16x8x2_t scales16; + scales16.val[0] = vmovl_s8(vget_low_s8(mins8)); + scales16.val[1] = vmovl_s8(vget_high_s8(mins8)); + accum_mins_16(scales16, q8, acc, i, -GGML_FP16_TO_FP32(x[i].dmin)); + + scales8 = vandq_u8(scales_and_mins, vdupq_n_u8(0xf)); + } + + template + inline int32x4x4_t new_block(int i, const Q8& q8, float32x4_t * acc) { + process_scales(i, q8, acc); + int16x8x2_t scales16; + scales16.val[0] = vmovl_s8(vget_low_s8(vreinterpretq_s8_u8(scales8))); + scales16.val[1] = vmovl_s8(vget_high_s8(vreinterpretq_s8_u8(scales8))); + return make_wider(scales16); + } + + template + inline void compute(const Q8& q8, int i, int j, int32x4_t * sumi) { + auto m1 = vdupq_n_u8(1); + auto shuffle = vdupq_n_u8(8*j); + bits.b1.val[0] = vmulq_u8(bits.b1.val[0], vqtbl1q_u8(scales8, shuffle)); shuffle = vaddq_u8(shuffle, m1); + bits.b1.val[1] = vmulq_u8(bits.b1.val[1], vqtbl1q_u8(scales8, shuffle)); shuffle = vaddq_u8(shuffle, m1); + bits.b1.val[2] = vmulq_u8(bits.b1.val[2], vqtbl1q_u8(scales8, shuffle)); shuffle = vaddq_u8(shuffle, m1); + bits.b1.val[3] = vmulq_u8(bits.b1.val[3], vqtbl1q_u8(scales8, shuffle)); shuffle = vaddq_u8(shuffle, m1); + bits.b2.val[0] = vmulq_u8(bits.b2.val[0], vqtbl1q_u8(scales8, shuffle)); shuffle = vaddq_u8(shuffle, m1); + bits.b2.val[1] = vmulq_u8(bits.b2.val[1], vqtbl1q_u8(scales8, shuffle)); shuffle = vaddq_u8(shuffle, m1); + bits.b2.val[2] = vmulq_u8(bits.b2.val[2], vqtbl1q_u8(scales8, shuffle)); shuffle = vaddq_u8(shuffle, m1); + bits.b2.val[3] = vmulq_u8(bits.b2.val[3], vqtbl1q_u8(scales8, shuffle)); shuffle = vaddq_u8(shuffle, m1); + for (int iy = 0; iy < Q8::nrc_y; ++iy) { + auto q8b_1 = q8.load_quants(iy, i, 4*j+0); + sumi[iy] = ggml_vdotq_s32(ggml_vdotq_s32(sumi[iy], vreinterpretq_s8_u8(bits.b1.val[0]), q8b_1.val[0]), + vreinterpretq_s8_u8(bits.b1.val[1]), q8b_1.val[1]); + + auto q8b_2 = q8.load_quants(iy, i, 4*j+1); + sumi[iy] = ggml_vdotq_s32(ggml_vdotq_s32(sumi[iy], vreinterpretq_s8_u8(bits.b1.val[2]), q8b_2.val[0]), + vreinterpretq_s8_u8(bits.b1.val[3]), q8b_2.val[1]); + + auto q8b_3 = q8.load_quants(iy, i, 4*j+2); + sumi[iy] = ggml_vdotq_s32(ggml_vdotq_s32(sumi[iy], vreinterpretq_s8_u8(bits.b2.val[0]), q8b_3.val[0]), + vreinterpretq_s8_u8(bits.b2.val[1]), q8b_3.val[1]); + + auto q8b_4 = q8.load_quants(iy, i, 4*j+3); + sumi[iy] = ggml_vdotq_s32(ggml_vdotq_s32(sumi[iy], vreinterpretq_s8_u8(bits.b2.val[2]), q8b_4.val[0]), + vreinterpretq_s8_u8(bits.b2.val[3]), q8b_4.val[1]); + } + } + + inline void prepare(int i, int j) { + bits.prepare(x[i].qs+32*j); + } + + uint32_t aux32[4]; + + uint8x16_t scales8; + + Q2bits bits; + + float d; +}; + +// ============================= i-quants + +struct DequantizerIQ4XS final : public BaseDequantizer { + + static int8x16_t load_values() { + static const int8_t iq4nl_values[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113}; + return vld1q_s8(iq4nl_values); + } + + DequantizerIQ4XS(const void * vx, size_t bx, int nrc) : BaseDequantizer(vx, bx, nrc), values(load_values()) {} + + constexpr static int num_blocks() { return 8; } + constexpr static bool should_scale_quants() { return false; } + + inline void new_row(int ix) { x = (const block_iq4_xs *)((const char *)vx + bx*ix); } + + template + inline int32x4x2_t new_block(int i, const Q8& q8, float32x4_t * acc) { + (void)q8; + (void)acc; + d = GGML_FP16_TO_FP32(x[i].d); + const uint16_t scales_h = x[i].scales_h; + const uint16_t * scales_l = (const uint16_t *)x[i].scales_l; + aux32[0] = scales_l[0] | (scales_l[1] << 16); + aux32[1] = aux32[0] >> 4; + // scl is ordered as 0, 2, 4, 6, 1, 3, 5, 7 + uint8x8_t scl8 = vand_u8(vld1_u8((const uint8_t *)aux32), vdup_n_u8(0xf)); + uint16_t * aux16 = (uint16_t *)aux32; + aux16[0] = scales_h << 4; aux16[1] = scales_h << 2; aux16[2] = scales_h; aux16[3] = scales_h >> 2; + // sch is ordered as 0, 4, 1, 5, 2, 6, 3, 7 + uint8x8_t sch8 = vand_u8(vld1_u8((const uint8_t *)aux16), vdup_n_u8(0x30)); + int8x8_t scales8 = vadd_s8(vreinterpret_s8_u8(vorr_u8(scl8, vtbl1_u8(sch8, vreinterpret_u8_u32(hshuff)))), vdup_n_s8(-32)); + // shuffle 0, 2, 4, 6, 1, 3, 5, 7 -> 0, 1, 2, 3, 4, 5, 6, 7 + scales8 = vtbl1_s8(scales8, vreinterpret_s8_u32(hshuff)); + int16x8_t scales16 = vmovl_s8(scales8); + int32x4x2_t scales = {vmovl_s16(vget_low_s16(scales16)), vmovl_s16(vget_high_s16(scales16))}; + return scales; + } + inline void prepare(int i, int j) { + bits.prepare16(x[i].qs+64*j); + for (int k = 0; k < 4; ++k) { + bits.b1.val[k] = vreinterpretq_u8_s8(vqtbl1q_s8(values, bits.b1.val[k])); + bits.b2.val[k] = vreinterpretq_u8_s8(vqtbl1q_s8(values, bits.b2.val[k])); + } + } + + Q4bits bits; + const int8x16_t values; + uint32_t aux32[2]; + + constexpr static uint32x2_t hshuff = {0x05010400, 0x07030602}; + + float d; +}; + +struct SimpleBits { + uint8x16x4_t b1; + uint8x16x4_t b2; +}; + +IQK_ALWAYS_INLINE int32x4x2_t prepare_scales_8(const uint32x4_t& v1, const uint32x4_t& v2) { + int32x4x2_t scales; + auto one = vdupq_n_u32(1); + scales.val[0] = vreinterpretq_s32_u32(vsliq_n_u32(one, vshrq_n_u32(v1, 28), 1)); + scales.val[1] = vreinterpretq_s32_u32(vsliq_n_u32(one, vshrq_n_u32(v2, 28), 1)); + return scales; +} + +inline void apply_signs_2(uint8x16_t * b, const uint64_t * signs, uint32_t sidx) { + auto s1 = vcombine_s8(vld1_s8((const int8_t *)(signs + ((sidx >> 0) & 127))), vld1_s8((const int8_t *)(signs + ((sidx >> 7) & 127)))); + auto s2 = vcombine_s8(vld1_s8((const int8_t *)(signs + ((sidx >>14) & 127))), vld1_s8((const int8_t *)(signs + ((sidx >>21) & 127)))); + b[0] = vreinterpretq_u8_s8(vmulq_s8(vreinterpretq_s8_u8(b[0]), s1)); + b[1] = vreinterpretq_u8_s8(vmulq_s8(vreinterpretq_s8_u8(b[1]), s2)); +} + +IQK_ALWAYS_INLINE int32x4_t prepare_scales_8(const uint32x4_t& v1) { + return vreinterpretq_s32_u32(vsliq_n_u32(vdupq_n_u32(1), vshrq_n_u32(v1, 28), 1)); +} + +struct DequantizerIQ2XXS final : public BaseDequantizer { + DequantizerIQ2XXS(const void * vx, size_t bx, int nrc) : BaseDequantizer(vx, bx, nrc) {} + + IQK_ALWAYS_INLINE float new_block(int i) const { return 0.125f * GGML_FP16_TO_FP32(x[i].d); } + + inline int32x4_t unpack(int i, int j, uint8x16_t * q) const { + auto data = vld1q_u32_x2((const uint32_t *)(x[i].qs + 16*j)); + prepare_all(data, q); + return prepare_scales_8(vuzp2q_u32(data.val[0], data.val[1])); + } + +private: + + static inline void prepare2(uint8x16_t * b, const uint32_t * bits, const uint64_t * signs) { + const uint8_t * idx = (const uint8_t *)bits; + b[0] = vreinterpretq_u8_u64(uint64x2_t{iq2xxs_grid[idx[0]], iq2xxs_grid[idx[1]]}); + b[1] = vreinterpretq_u8_u64(uint64x2_t{iq2xxs_grid[idx[2]], iq2xxs_grid[idx[3]]}); + apply_signs_2(b, signs, bits[1]); + } + + inline static void prepare_all(const uint32x4x2_t& data, uint8x16_t * quants) { + const uint32_t * q2 = (const uint32_t *)data.val; + prepare2(quants+0, q2+0, keven_signs); + prepare2(quants+2, q2+2, keven_signs); + prepare2(quants+4, q2+4, keven_signs); + prepare2(quants+6, q2+6, keven_signs); + } +}; + +inline int32x4x4_t prepare_4bit_scales16(const uint8_t * sc) { + auto aux = vld1_u8(sc); + auto scales_l = vand_u8(aux, vdup_n_u8(0xf)); + auto scales_h = vshr_n_u8(aux, 4); + auto aux1 = vcombine_u8(vzip1_u8(scales_l, scales_h), vzip2_u8(scales_l, scales_h)); + + auto scales8 = vreinterpretq_s8_u8(vorrq_u8(vshlq_n_u8(aux1, 1), vdupq_n_u8(1))); + int16x8x2_t scales16 = { vmovl_s8(vget_low_s8(scales8)), vmovl_s8(vget_high_s8(scales8)) }; + return make_wider(scales16); +} + +struct DequantizerIQ2XS final : public BaseDequantizer { + DequantizerIQ2XS(const void * vx, size_t bx, int nrc) : BaseDequantizer(vx, bx, nrc) {} + + constexpr static int num_blocks() { return 16; } + constexpr static bool should_scale_quants() { return false; } + + SimpleBits bits; + float d; + + inline int32x4x4_t new_block(int i) { + d = 0.125f * GGML_FP16_TO_FP32(x[i].d); + prepare_internal(i, 0); + return prepare_4bit_scales16(x[i].scales); + } + + inline void prepare(int i, int j) { + if (j == 1) prepare_internal(i, 1); + } + +private: + + static void make2(const uint16_t * qs, uint8x16_t * b) { + auto v1 = vcombine_s8(vld1_s8((const int8_t *)(iq2xs_grid + (qs[0] & 511))), vld1_s8((const int8_t *)(iq2xs_grid + (qs[1] & 511)))); + auto v2 = vcombine_s8(vld1_s8((const int8_t *)(iq2xs_grid + (qs[2] & 511))), vld1_s8((const int8_t *)(iq2xs_grid + (qs[3] & 511)))); + auto s1 = vcombine_s8(vld1_s8((const int8_t *)(keven_signs + (qs[0] >> 9))), vld1_s8((const int8_t *)(keven_signs + (qs[1] >> 9)))); + auto s2 = vcombine_s8(vld1_s8((const int8_t *)(keven_signs + (qs[2] >> 9))), vld1_s8((const int8_t *)(keven_signs + (qs[3] >> 9)))); + b[0] = vreinterpretq_u8_s8(vmulq_s8(v1, s1)); + b[1] = vreinterpretq_u8_s8(vmulq_s8(v2, s2)); + } + + inline static void make4(const uint16_t * qs, uint8x16_t * b) { + make2(qs + 0, b + 0); + make2(qs + 4, b + 2); + } + + IQK_ALWAYS_INLINE void prepare_internal(int i, int j) { + make4(x[i].qs + 16*j + 0, bits.b1.val); + make4(x[i].qs + 16*j + 8, bits.b2.val); + } + +}; + +// So, I hate to include this table, but with the GCC 12.3 compiler +// bundled in the Cosmopolitan tools, loading the unpacked sign bytes +// from this table using the packed 8 sign bits as index is faster than +// using the standard trick of vceqq_u8(vandq_u8(bits, mask), mask) to +// expand the bits to bytes. +static const uint64_t kall_signs[256] = { + 0x0101010101010101, 0x01010101010101ff, 0x010101010101ff01, 0x010101010101ffff, + 0x0101010101ff0101, 0x0101010101ff01ff, 0x0101010101ffff01, 0x0101010101ffffff, + 0x01010101ff010101, 0x01010101ff0101ff, 0x01010101ff01ff01, 0x01010101ff01ffff, + 0x01010101ffff0101, 0x01010101ffff01ff, 0x01010101ffffff01, 0x01010101ffffffff, + 0x010101ff01010101, 0x010101ff010101ff, 0x010101ff0101ff01, 0x010101ff0101ffff, + 0x010101ff01ff0101, 0x010101ff01ff01ff, 0x010101ff01ffff01, 0x010101ff01ffffff, + 0x010101ffff010101, 0x010101ffff0101ff, 0x010101ffff01ff01, 0x010101ffff01ffff, + 0x010101ffffff0101, 0x010101ffffff01ff, 0x010101ffffffff01, 0x010101ffffffffff, + 0x0101ff0101010101, 0x0101ff01010101ff, 0x0101ff010101ff01, 0x0101ff010101ffff, + 0x0101ff0101ff0101, 0x0101ff0101ff01ff, 0x0101ff0101ffff01, 0x0101ff0101ffffff, + 0x0101ff01ff010101, 0x0101ff01ff0101ff, 0x0101ff01ff01ff01, 0x0101ff01ff01ffff, + 0x0101ff01ffff0101, 0x0101ff01ffff01ff, 0x0101ff01ffffff01, 0x0101ff01ffffffff, + 0x0101ffff01010101, 0x0101ffff010101ff, 0x0101ffff0101ff01, 0x0101ffff0101ffff, + 0x0101ffff01ff0101, 0x0101ffff01ff01ff, 0x0101ffff01ffff01, 0x0101ffff01ffffff, + 0x0101ffffff010101, 0x0101ffffff0101ff, 0x0101ffffff01ff01, 0x0101ffffff01ffff, + 0x0101ffffffff0101, 0x0101ffffffff01ff, 0x0101ffffffffff01, 0x0101ffffffffffff, + 0x01ff010101010101, 0x01ff0101010101ff, 0x01ff01010101ff01, 0x01ff01010101ffff, + 0x01ff010101ff0101, 0x01ff010101ff01ff, 0x01ff010101ffff01, 0x01ff010101ffffff, + 0x01ff0101ff010101, 0x01ff0101ff0101ff, 0x01ff0101ff01ff01, 0x01ff0101ff01ffff, + 0x01ff0101ffff0101, 0x01ff0101ffff01ff, 0x01ff0101ffffff01, 0x01ff0101ffffffff, + 0x01ff01ff01010101, 0x01ff01ff010101ff, 0x01ff01ff0101ff01, 0x01ff01ff0101ffff, + 0x01ff01ff01ff0101, 0x01ff01ff01ff01ff, 0x01ff01ff01ffff01, 0x01ff01ff01ffffff, + 0x01ff01ffff010101, 0x01ff01ffff0101ff, 0x01ff01ffff01ff01, 0x01ff01ffff01ffff, + 0x01ff01ffffff0101, 0x01ff01ffffff01ff, 0x01ff01ffffffff01, 0x01ff01ffffffffff, + 0x01ffff0101010101, 0x01ffff01010101ff, 0x01ffff010101ff01, 0x01ffff010101ffff, + 0x01ffff0101ff0101, 0x01ffff0101ff01ff, 0x01ffff0101ffff01, 0x01ffff0101ffffff, + 0x01ffff01ff010101, 0x01ffff01ff0101ff, 0x01ffff01ff01ff01, 0x01ffff01ff01ffff, + 0x01ffff01ffff0101, 0x01ffff01ffff01ff, 0x01ffff01ffffff01, 0x01ffff01ffffffff, + 0x01ffffff01010101, 0x01ffffff010101ff, 0x01ffffff0101ff01, 0x01ffffff0101ffff, + 0x01ffffff01ff0101, 0x01ffffff01ff01ff, 0x01ffffff01ffff01, 0x01ffffff01ffffff, + 0x01ffffffff010101, 0x01ffffffff0101ff, 0x01ffffffff01ff01, 0x01ffffffff01ffff, + 0x01ffffffffff0101, 0x01ffffffffff01ff, 0x01ffffffffffff01, 0x01ffffffffffffff, + 0xff01010101010101, 0xff010101010101ff, 0xff0101010101ff01, 0xff0101010101ffff, + 0xff01010101ff0101, 0xff01010101ff01ff, 0xff01010101ffff01, 0xff01010101ffffff, + 0xff010101ff010101, 0xff010101ff0101ff, 0xff010101ff01ff01, 0xff010101ff01ffff, + 0xff010101ffff0101, 0xff010101ffff01ff, 0xff010101ffffff01, 0xff010101ffffffff, + 0xff0101ff01010101, 0xff0101ff010101ff, 0xff0101ff0101ff01, 0xff0101ff0101ffff, + 0xff0101ff01ff0101, 0xff0101ff01ff01ff, 0xff0101ff01ffff01, 0xff0101ff01ffffff, + 0xff0101ffff010101, 0xff0101ffff0101ff, 0xff0101ffff01ff01, 0xff0101ffff01ffff, + 0xff0101ffffff0101, 0xff0101ffffff01ff, 0xff0101ffffffff01, 0xff0101ffffffffff, + 0xff01ff0101010101, 0xff01ff01010101ff, 0xff01ff010101ff01, 0xff01ff010101ffff, + 0xff01ff0101ff0101, 0xff01ff0101ff01ff, 0xff01ff0101ffff01, 0xff01ff0101ffffff, + 0xff01ff01ff010101, 0xff01ff01ff0101ff, 0xff01ff01ff01ff01, 0xff01ff01ff01ffff, + 0xff01ff01ffff0101, 0xff01ff01ffff01ff, 0xff01ff01ffffff01, 0xff01ff01ffffffff, + 0xff01ffff01010101, 0xff01ffff010101ff, 0xff01ffff0101ff01, 0xff01ffff0101ffff, + 0xff01ffff01ff0101, 0xff01ffff01ff01ff, 0xff01ffff01ffff01, 0xff01ffff01ffffff, + 0xff01ffffff010101, 0xff01ffffff0101ff, 0xff01ffffff01ff01, 0xff01ffffff01ffff, + 0xff01ffffffff0101, 0xff01ffffffff01ff, 0xff01ffffffffff01, 0xff01ffffffffffff, + 0xffff010101010101, 0xffff0101010101ff, 0xffff01010101ff01, 0xffff01010101ffff, + 0xffff010101ff0101, 0xffff010101ff01ff, 0xffff010101ffff01, 0xffff010101ffffff, + 0xffff0101ff010101, 0xffff0101ff0101ff, 0xffff0101ff01ff01, 0xffff0101ff01ffff, + 0xffff0101ffff0101, 0xffff0101ffff01ff, 0xffff0101ffffff01, 0xffff0101ffffffff, + 0xffff01ff01010101, 0xffff01ff010101ff, 0xffff01ff0101ff01, 0xffff01ff0101ffff, + 0xffff01ff01ff0101, 0xffff01ff01ff01ff, 0xffff01ff01ffff01, 0xffff01ff01ffffff, + 0xffff01ffff010101, 0xffff01ffff0101ff, 0xffff01ffff01ff01, 0xffff01ffff01ffff, + 0xffff01ffffff0101, 0xffff01ffffff01ff, 0xffff01ffffffff01, 0xffff01ffffffffff, + 0xffffff0101010101, 0xffffff01010101ff, 0xffffff010101ff01, 0xffffff010101ffff, + 0xffffff0101ff0101, 0xffffff0101ff01ff, 0xffffff0101ffff01, 0xffffff0101ffffff, + 0xffffff01ff010101, 0xffffff01ff0101ff, 0xffffff01ff01ff01, 0xffffff01ff01ffff, + 0xffffff01ffff0101, 0xffffff01ffff01ff, 0xffffff01ffffff01, 0xffffff01ffffffff, + 0xffffffff01010101, 0xffffffff010101ff, 0xffffffff0101ff01, 0xffffffff0101ffff, + 0xffffffff01ff0101, 0xffffffff01ff01ff, 0xffffffff01ffff01, 0xffffffff01ffffff, + 0xffffffffff010101, 0xffffffffff0101ff, 0xffffffffff01ff01, 0xffffffffff01ffff, + 0xffffffffffff0101, 0xffffffffffff01ff, 0xffffffffffffff01, 0xffffffffffffffff, +}; + +struct SignHelper { + + IQK_ALWAYS_INLINE void apply_signs_1x(uint8x16_t * b, const uint8_t * sign_bits) const { + auto s = vreinterpretq_s8_u64(uint64x2_t{kall_signs[sign_bits[0]], kall_signs[sign_bits[1]]}); + // Normally we would expect this to be faster, but it isn't. + // auto aux = vcombine_u8(vdup_n_u8(sign_bits[0]), vdup_n_u8(sign_bits[1])); + // auto s = vreinterpretq_s8_u8(vorrq_u8(vceqq_u8(vandq_u8(aux, smask), smask), m1)); + b[0] = vreinterpretq_u8_s8(vmulq_s8(vreinterpretq_s8_u8(b[0]), s)); + } + + // We would need these two if we weren't loading from the unpacked sign table. + //const uint8x16_t smask = vreinterpretq_u8_u64(vdupq_n_u64(0x8040201008040201)); + //const uint8x16_t m1 = vdupq_n_u8(1); +}; + +struct DequantizerIQ2S final : public BaseDequantizer { + DequantizerIQ2S(const void * vx, size_t bx, int nrc) : BaseDequantizer(vx, bx, nrc) {} + + constexpr static int num_blocks() { return 16; } + constexpr static bool should_scale_quants() { return false; } + + SimpleBits bits; + float d; + + inline int32x4x4_t new_block(int i) { + d = 0.125f * GGML_FP16_TO_FP32(x[i].d); + prepare_internal(i, 0, bits); + return prepare_4bit_scales16(x[i].scales); + } + + inline void prepare(int i, int j) { + if (j == 1) prepare_internal(i, 1, bits); + } + +private: + + static void make4(const SignHelper& sh, const uint8_t * sign_bits, const uint8_t * qs, const uint8_t * qh, uint8x16_t * b) { + uint32_t aux32[2]; + const uint16_t * aux16 = (const uint16_t *)aux32; + for (int k = 0; k < 2; ++k) { + aux32[1] = (qh[k] << 4) | (qh[k] << 18); + aux32[0] = (aux32[1] << 4) & 0x03000300; + aux32[1] &= 0x03000300; + b[2*k+0] = vcombine_u8(vld1_u8((const uint8_t *)(iq2s_grid + (qs[4*k+0] | aux16[0]))), + vld1_u8((const uint8_t *)(iq2s_grid + (qs[4*k+1] | aux16[1])))); + b[2*k+1] = vcombine_u8(vld1_u8((const uint8_t *)(iq2s_grid + (qs[4*k+2] | aux16[2]))), + vld1_u8((const uint8_t *)(iq2s_grid + (qs[4*k+3] | aux16[3])))); + sh.apply_signs_1x(b+2*k+0, sign_bits); sign_bits += 2; + sh.apply_signs_1x(b+2*k+1, sign_bits); sign_bits += 2; + } + } + + void prepare_internal(int i, int j, SimpleBits& sb) { + + const auto * qs = x[i].qs + 16*j; + const auto * qh = x[i].qh + 4*j; + const auto * sign_bits = qs + QK_K/8; + + make4(sh, sign_bits+0, qs+0, qh+0, sb.b1.val); + make4(sh, sign_bits+8, qs+8, qh+2, sb.b2.val); + } + + SignHelper sh; +}; + +struct DequantizerIQ3XXS final : public BaseDequantizer { + DequantizerIQ3XXS(const void * vx, size_t bx, int nrc) : BaseDequantizer(vx, bx, nrc) {} + + IQK_ALWAYS_INLINE float new_block(int i) const { return 0.25f * GGML_FP16_TO_FP32(x[i].d); } + + inline int32x4_t unpack(int i, int j, uint8x16_t * q) const { + auto q3data = vld1q_u8_x2(x[i].qs + 32*j); + auto gas = vld1q_u32((const uint32_t *)(x[i].qs + QK_K/4 + 16*j)); + prepare_block((const uint8_t *)q3data.val, (const uint32_t *)&gas, q); + return prepare_scales_8(gas); + } + +private: + + inline static void make2(const uint8_t * q3, const uint32_t sidx, uint8x16_t * b) { + b[0] = vreinterpretq_u8_u32(uint32x4_t{iq3xxs_grid[q3[0]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[3]]}); + b[1] = vreinterpretq_u8_u32(uint32x4_t{iq3xxs_grid[q3[4]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[7]]}); + apply_signs_2(b, keven_signs, sidx); + } + inline static void prepare_block(const uint8_t * q3, const uint32_t * signs, uint8x16_t * quants) { + make2(q3+ 0, signs[0], quants + 0); + make2(q3+ 8, signs[1], quants + 2); + make2(q3+16, signs[2], quants + 4); + make2(q3+24, signs[3], quants + 6); + } +}; + +struct DequantizerIQ3S final : public BaseDequantizer { + DequantizerIQ3S(const void * vx, size_t bx, int nrc) : BaseDequantizer(vx, bx, nrc) {} + + constexpr static int num_blocks() { return 8; } + constexpr static bool should_scale_quants() { return false; } + + SimpleBits bits; + float d; + + inline int32x4x2_t new_block(int i) { + d = GGML_FP16_TO_FP32(x[i].d); + uint32_t scales32[2]; + auto qs = vld1q_u8_x2(x[i].qs); + auto signs = vld1q_u8(x[i].signs); + + prepare_block((const uint8_t *)qs.val, x[i].qh, (const uint8_t *)&signs); + + std::memcpy(scales32, x[i].scales, 4); + scales32[1] = (((scales32[0] >> 4) & 0x0f0f0f0f) << 1) | 0x01010101; + scales32[0] = ((scales32[0] & 0x0f0f0f0f) << 1) | 0x01010101; + auto scales8 = vld1_u8((const uint8_t *)scales32); // 0, 2, 4, 6, 1, 3, 5, 7 + scales8 = vtbl1_u8(scales8, vreinterpret_u8_u64(vdup_n_u64(0x0703060205010400))); + auto scales16 = vreinterpretq_s16_u16(vmovl_u8(scales8)); + int32x4x2_t scales; + scales.val[0] = vmovl_s16(vget_low_s16(scales16)); + scales.val[1] = vmovl_s16(vget_high_s16(scales16)); + return scales; + } + + inline void prepare(int i, int j) { + if (j == 1) { + auto qs = vld1q_u8_x2(x[i].qs + 32); + auto signs = vld1q_u8(x[i].signs + 16); + prepare_block((const uint8_t *)qs.val, x[i].qh + 4, (const uint8_t *)&signs); + } + } + +private: + + static inline void make2(const SignHelper& sh, const uint8_t * sign_bits, const uint16x8_t& idx_l, uint8_t qh, + const int16x8_t& hshift, uint8x16_t * b) { + auto vindex = vorrq_u16(idx_l, vandq_u16(vshlq_u16(vdupq_n_u16(qh), hshift), vdupq_n_u16(256))); + const uint16_t * idx = (const uint16_t *)&vindex; + b[0] = vreinterpretq_u8_u32(uint32x4_t{iq3s_grid[idx[0]], iq3s_grid[idx[1]], iq3s_grid[idx[2]], iq3s_grid[idx[3]]}); + sh.apply_signs_1x(b+0, sign_bits+0); + b[1] = vreinterpretq_u8_u32(uint32x4_t{iq3s_grid[idx[4]], iq3s_grid[idx[5]], iq3s_grid[idx[6]], iq3s_grid[idx[7]]}); + sh.apply_signs_1x(b+1, sign_bits+2); + } + static inline void make4(const SignHelper& sh, const uint8_t * sign_bits, const uint8_t * qs, const uint8_t * qh, + const int16x8_t& hshift, uint8x16_t * b) { + auto idx_l = vld1q_u8(qs); + make2(sh, sign_bits+0, vmovl_u8(vget_low_u8 (idx_l)), qh[0], hshift, b+0); + make2(sh, sign_bits+4, vmovl_u8(vget_high_u8(idx_l)), qh[1], hshift, b+2); + } + + static int16x8_t load_shift() { + static const int16_t k_shift[8] = {8, 7, 6, 5, 4, 3, 2, 1}; + return vld1q_s16(k_shift); + } + + inline void prepare_block(const uint8_t * qs, const uint8_t * qh, const uint8_t * sign_bits) { + auto signs = vld1q_u8(sign_bits); + auto s = (const uint8_t *)&signs; + make4(sh, s + 0, qs+ 0, qh+0, hshift, bits.b1.val); + make4(sh, s + 8, qs+16, qh+2, hshift, bits.b2.val); + } + + SignHelper sh; + const int16x8_t hshift = load_shift(); + +}; + +template +IQK_NOINLINE void mul_mat_qX_K_q8_K_IQXXS(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) { + assert(n % QK_K == 0); + const int nb = n / QK_K; + + Q8 q8(info); + Dequantizer deq(vx, bx, nrc_y); + uint8x16_t qx[8]; + int32x4_t sumi[nrc_y]; + float32x4_t acc[nrc_y]; + + for (int ix = 0; ix < nrc_x; ++ix) { + + deq.new_row(ix); + for (int iy = 0; iy < nrc_y; ++iy) acc[iy] = vdupq_n_f32(0.f); + + for (int i = 0; i < nb; ++i) { + float d = deq.new_block(i); + auto scales = deq.unpack(i, 0, qx); +#pragma GCC unroll 8 + for (int iy = 0; iy < nrc_y; ++iy) { + sumi[iy] = vdupq_n_s32(0); + compute_8_blocks((const int8x16_t *)qx, q8, scales, iy, i, 0, sumi[iy]); + } + scales = deq.unpack(i, 1, qx); +#pragma GCC unroll 8 + for (int iy = 0; iy < nrc_y; ++iy) { + compute_8_blocks((const int8x16_t *)qx, q8, scales, iy, i, 1, sumi[iy]); + acc[iy] = vmlaq_f32(acc[iy], vdupq_n_f32(d*q8.scale(iy, i)), vcvtq_f32_s32(sumi[iy])); + } + } +#pragma GCC unroll 8 + for (int iy = 0; iy < nrc_y; ++iy) { + info.store(ix, iy, vaddvq_f32(acc[iy])); + } + } +} + +// =========================================== Legacy quants + +template +inline float16x4_t load_scales_q0(const Block * x, ggml_half * aux) { + for (int k = 0; k < 4; ++k) aux[k] = x[k].d; + return vld1_f16((const float16_t *)aux); +} + +template +inline float16x8_t load_scales_q1(const Block * x, ggml_half * aux) { + if constexpr (std::is_same_v) { + for (int k = 0; k < 4; ++k) { aux[k] = x[k].d; aux[k+4] = x[k].s; } + } else { + for (int k = 0; k < 4; ++k) { aux[k] = x[k].d; aux[k+4] = x[k].m; } + } + return vld1q_f16((const float16_t *)aux); +} + +struct Q4LegacyBits { + template + inline void prepare(const Block * x) { + for (int i = 0; i < 4; ++i) { + auto q4bits = vld1q_u8(x[i].qs); + b[2*i+0] = vreinterpretq_s8_u8(vandq_u8(q4bits, m4b)); + b[2*i+1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits, 4)); + } + } + inline void prepare1(const uint8_t * qs, int8x16_t * q) const { + auto q4bits = vld1q_u8(qs); + q[0] = vreinterpretq_s8_u8(vandq_u8(q4bits, m4b)); + q[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits, 4)); + } + inline void prepare1(const uint8_t * qs) { + prepare1(qs, b); + } + const uint8x16_t m4b = vdupq_n_u8(0xf); + int8x16_t b[8]; +}; + +// One would think this commented out version would do better than the one below +// because it offers more opportunities to execute instructions in parallel. +// Instead, it runs significantly slower. Why? If the compiler is running out of vector registers +// cannot it just do the sequential version below on its own? +//inline int32x4_t sum_4_blocks(const int8x16_t * b, const int8_t * qs) { +// const auto q8b_1 = vld1q_s8_x2(qs + 0); +// auto p12 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), b[0], q8b_1.val[0]), b[1], q8b_1.val[1]); +// const auto q8b_2 = vld1q_s8_x2(qs + 32); +// auto p34 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), b[2], q8b_2.val[0]), b[3], q8b_2.val[1]); +// auto p1234 = vpaddq_s32(p12, p34); +// const auto q8b_3 = vld1q_s8_x2(qs + 64); +// auto p56 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), b[4], q8b_3.val[0]), b[5], q8b_3.val[1]); +// const auto q8b_4 = vld1q_s8_x2(qs + 96); +// auto p78 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), b[6], q8b_4.val[0]), b[7], q8b_4.val[1]); +// return vpaddq_s32(p1234, vpaddq_s32(p56, p78)); +//} + +inline int32x4_t sum_4_blocks(const int8x16_t * b, const int8_t * qs) { + auto q8b = vld1q_s8_x2(qs + 0); + auto p12 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), b[0], q8b.val[0]), b[1], q8b.val[1]); + q8b = vld1q_s8_x2(qs + 32); + auto p34 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), b[2], q8b.val[0]), b[3], q8b.val[1]); + auto p1234 = vpaddq_s32(p12, p34); + q8b = vld1q_s8_x2(qs + 64); + auto p56 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), b[4], q8b.val[0]), b[5], q8b.val[1]); + q8b = vld1q_s8_x2(qs + 96); + auto p78 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), b[6], q8b.val[0]), b[7], q8b.val[1]); + return vpaddq_s32(p1234, vpaddq_s32(p56, p78)); +} + +template struct Q80 { + + constexpr static int nrc_y = nrc; + + Q80(const DataInfo& info) { + for (int iy = 0; iy < nrc_y; ++iy) y[iy] = (const block_q8_0 *)info.src1_row(iy); + } + + inline const int8_t * quant_data(int iy, int i) const { + const block_q8_0_x4 * y4 = (const block_q8_0_x4 *)y[iy] + i; + return y4->qs; + } + + inline float16x4_t load_scales(int iy, int i) const { + const block_q8_0_x4 * y4 = (const block_q8_0_x4 *)y[iy] + i; + return vld1_f16((const float16_t *)y4->d); + } + + template + inline void process_scales(int i, Dequantizer& deq, float16x4_t * sc16, float32x4_t * /*acc*/) const { + auto qx_scales = deq.new_block(i); + for (int iy = 0; iy < nrc; ++iy) { + auto q8_scales = load_scales(iy, i); + sc16[iy] = vmul_f16(qx_scales, q8_scales); + } + } + + template + inline void process_1_block(int i, Dequantizer& deq, float32x4_t * acc) const { + deq.prepare1(i); + float d = GGML_FP16_TO_FP32(deq.x[i].d); + for (int iy = 0; iy < nrc; ++iy) { + auto q8b = vld1q_s8_x2(y[iy][i].qs); + auto p = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), deq.bits.b[0], q8b.val[0]), deq.bits.b[1], q8b.val[1]); + acc[iy] = vmlaq_f32(acc[iy], vdupq_n_f32(d*GGML_FP16_TO_FP32(y[iy][i].d)), vcvtq_f32_s32(p)); + } + } + + const block_q8_0 * y[nrc_y]; +}; + +template struct Q81 { + + constexpr static int nrc_y = nrc; + + Q81(const DataInfo& info) { + for (int iy = 0; iy < nrc_y; ++iy) y[iy] = (const block_q8_1 *)info.src1_row(iy); + } + + inline const int8_t * quant_data(int iy, int i) const { + const block_q8_1_x4 * y4 = (const block_q8_1_x4 *)y[iy] + i; + return y4->qs; + } + + inline float16x8_t load_scales(int iy, int i) const { + const block_q8_1_x4 * y4 = (const block_q8_1_x4 *)y[iy] + i; + return vld1q_f16((const float16_t *)y4->d); + } + + template + inline void process_scales(int i, Dequantizer& deq, float16x4_t * sc16, float32x4_t * acc) const { + auto qx_scales = deq.new_block(i); + for (int iy = 0; iy < nrc; ++iy) { + auto q8_scales = load_scales(iy, i); + auto m = vmul_f16(vget_high_f16(qx_scales), vget_high_f16(q8_scales)); + acc[iy] = vaddq_f32(acc[iy], vcvt_f32_f16(m)); + sc16[iy] = vmul_f16(vget_low_f16(qx_scales), vget_low_f16(q8_scales)); + } + } + + template + inline void process_1_block(int i, Dequantizer& deq, float32x4_t * acc) const { + deq.prepare1(i); + float d = GGML_FP16_TO_FP32(deq.x[i].d), m = 0.25f*GGML_FP16_TO_FP32(deq.x[i].m); + for (int iy = 0; iy < nrc; ++iy) { + auto q8b = vld1q_s8_x2(y[iy][i].qs); + auto p = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), deq.bits.b[0], q8b.val[0]), deq.bits.b[1], q8b.val[1]); + acc[iy] = vmlaq_f32(acc[iy], vdupq_n_f32(d*GGML_FP16_TO_FP32(y[iy][i].d)), vcvtq_f32_s32(p)); + acc[iy] = vaddq_f32(acc[iy], vdupq_n_f32(m*GGML_FP16_TO_FP32(y[iy][i].s))); + } + } + + const block_q8_1 * y[nrc_y]; +}; + +template +struct BaseLegacyDequantizer { + + BaseLegacyDequantizer(const void * vx, size_t bx) : vx(vx), x(nullptr), bx(bx) {} + + inline void new_row(int ix) { x = (const block_q *)((const char *)vx + bx*ix); } + + Q4LegacyBits bits; + + const void * vx; + const block_q * x; + size_t bx; +}; + +struct DequantizerQ40 final : public BaseLegacyDequantizer { + + DequantizerQ40(const void * vx, size_t bx) : BaseLegacyDequantizer(vx, bx) {} + + inline void prepare1(int i, int8x16_t * q) const { + bits.prepare1(x[i].qs, q); + q[0] = vaddq_s8(q[0], m8); + q[1] = vaddq_s8(q[1], m8); + } + inline void prepare1(int i) { + prepare1(i, bits.b); + } + + inline float16x4_t new_block(int i) { + ggml_half aux[4]; + for (int k = 0; k < 4; ++k) { + aux[k] = x[4*i+k].d; + prepare1(4*i+k, bits.b + 2*k); + } + return vld1_f16((const float16_t *)aux); + } + + const int8x16_t m8 = vdupq_n_s8(-8); + //ggml_half aux[4]; +}; + +struct DequantizerQ41 : public BaseLegacyDequantizer { + + DequantizerQ41(const void * vx, size_t bx) : BaseLegacyDequantizer(vx, bx) {} + + inline void prepare1(int i) { + bits.prepare1(x[i].qs); + } + + inline float16x8_t new_block(int i) { + uint32_t aux32[4]; + const uint32_t * s32 = (const uint32_t *)&x[4*i].d; + for (int k = 0; k < 4; ++k) { + aux32[k] = *s32; s32 += sizeof(block_q4_1)/4; + bits.prepare1(x[4*i+k].qs, bits.b + 2*k); + } + return vreinterpretq_f16_u8(vqtbl1q_u8(vld1q_u8((const uint8_t *)aux32), vreinterpretq_u8_u64(shuffle))); + } + // Leaving this commented out attempt to be reminded that I already tried this. + // It has basically the same performance as the version above. + //inline float16x8_t new_block(int i) { + // uint32x4_t scales = {}; + // const block_q4_1 * xi = x + 4*i; + // const uint32_t * s32 = (const uint32_t *)&xi->d; + // scales = vsetq_lane_u32(*s32, scales, 0); s32 += sizeof(block_q4_1)/4; + // bits.prepare1(xi[0].qs, bits.b + 0); + // scales = vsetq_lane_u32(*s32, scales, 1); s32 += sizeof(block_q4_1)/4; + // bits.prepare1(xi[1].qs, bits.b + 2); + // scales = vsetq_lane_u32(*s32, scales, 2); s32 += sizeof(block_q4_1)/4; + // bits.prepare1(xi[2].qs, bits.b + 4); + // scales = vsetq_lane_u32(*s32, scales, 3); + // bits.prepare1(xi[3].qs, bits.b + 6); + // return vreinterpretq_f16_u8(vqtbl1q_u8(vreinterpretq_u8_u32(scales), vreinterpretq_u8_u64(shuffle))); + //} + + const uint64x2_t shuffle = {0x0d0c090805040100, 0x0f0e0b0a07060302}; +}; + +struct HighBit5Legacy { + inline uint8x16_t to_bytes(const uint8_t * qh) const { + uint8x16_t h = vqtbl1q_u8(vreinterpretq_u8_u16(vdupq_n_u16(*(const uint16_t *)qh)), shuffle); + return vceqq_u8(vandq_u8(h, vreinterpretq_u8_u64(mask)), vreinterpretq_u8_u64(mask)); + } + inline uint8x16_t to_negated_bytes(const uint8_t * qh) const { + uint8x16_t h = vqtbl1q_u8(vreinterpretq_u8_u16(vdupq_n_u16(*(const uint16_t *)qh)), shuffle); + return vceqq_u8(vandq_u8(h, vreinterpretq_u8_u64(mask)), vdupq_n_u8(0)); + } + const uint64x2_t mask = vdupq_n_u64(0x8040201008040201); + const uint8x16_t shuffle = vcombine_u8(vdup_n_u8(0), vdup_n_u8(1)); +}; + +struct DequantizerQ50 final : public BaseLegacyDequantizer { + + DequantizerQ50(const void * vx, size_t bx) : BaseLegacyDequantizer(vx, bx) {} + + inline void prepare1(int i, int8x16_t * q) const { + bits.prepare1(x[i].qs, q); + auto qh = x[i].qh; + q[0] = vreinterpretq_s8_u8(vorrq_u8(vreinterpretq_u8_s8(q[0]), vandq_u8(mh, hbits.to_negated_bytes(qh+0)))); + q[1] = vreinterpretq_s8_u8(vorrq_u8(vreinterpretq_u8_s8(q[1]), vandq_u8(mh, hbits.to_negated_bytes(qh+2)))); + } + inline void prepare1(int i) { + prepare1(i, bits.b); + } + + inline float16x4_t new_block(int i) { + ggml_half aux[4]; + for (int k = 0; k < 4; ++k) { + aux[k] = x[4*i+k].d; + prepare1(4*i+k, bits.b + 2*k); + } + return vld1_f16((const float16_t *)aux); + } + + HighBit5Legacy hbits; + + const uint8x16_t mh = vdupq_n_u8(0xf0); + +}; + +struct DequantizerQ80 final : public BaseLegacyDequantizer { + + DequantizerQ80(const void * vx, size_t bx) : BaseLegacyDequantizer(vx, bx) {} + + inline void prepare1(int i) { + bits.b[0] = vld1q_s8(x[i].qs); + bits.b[1] = vld1q_s8(x[i].qs+16); + } + + inline float16x4_t new_block(int i) { + ggml_half aux[4]; + for (int k = 0; k < 4; ++k) { + aux[k] = x[4*i+k].d; + bits.b[2*k+0] = vld1q_s8(x[4*i+k].qs); + bits.b[2*k+1] = vld1q_s8(x[4*i+k].qs+16); + } + return vld1_f16((const float16_t *)aux); + } + +}; + +struct DequantizerQ51 final : public BaseLegacyDequantizer { + + DequantizerQ51(const void * vx, size_t bx) : BaseLegacyDequantizer(vx, bx) {} + + inline void prepare1(int i, int8x16_t * q) const { + bits.prepare1(x[i].qs, q); + auto qh = x[i].qh; + q[0] = vreinterpretq_s8_u8(vorrq_u8(vreinterpretq_u8_s8(q[0]), vandq_u8(mh, hbits.to_bytes(qh+0)))); + q[1] = vreinterpretq_s8_u8(vorrq_u8(vreinterpretq_u8_s8(q[1]), vandq_u8(mh, hbits.to_bytes(qh+2)))); + } + inline void prepare1(int i) { + bits.prepare1(x[i].qs, bits.b); + } + + inline float16x8_t new_block(int i) { + uint32_t aux32[4]; + const uint32_t * s32 = (const uint32_t *)&x[4*i].d; + for (int k = 0; k < 4; ++k) { + aux32[k] = *s32; s32 += sizeof(block_q5_1)/4; + prepare1(4*i+k, bits.b + 2*k); + } + return vreinterpretq_f16_u8(vqtbl1q_u8(vld1q_u8((const uint8_t *)aux32), vreinterpretq_u8_u64(shuffle))); + } + + HighBit5Legacy hbits; + + const uint8x16_t mh = vdupq_n_u8(0x10); + const uint64x2_t shuffle = {0x0d0c090805040100, 0x0f0e0b0a07060302}; + +}; + +template +inline void sum_4(int i, Dequantizer& deq, const Q8& q8, const float16x4_t * sc16, float32x4_t * acc) { + for (int iy = 0; iy < Q8::nrc_y; ++iy) { + auto pall = sum_4_blocks(deq.bits.b, q8.quant_data(iy, i)); + auto scale = vcvt_f32_f16(sc16[iy]); + acc[iy] = vmlaq_f32(acc[iy], scale, vcvtq_f32_s32(pall)); + } +} + +template +inline void mul_mat_qX_Y_q8_Y(int n, Dequantizer& deq, Q8& q8, const DataInfo& info, int nrc_x) { + const int nb = n / QK4_1; + + float16x4_t sc16[Q8::nrc_y]; + + for (int ix = 0; ix < nrc_x; ++ix) { + + deq.new_row(ix); + + float32x4_t acc[Q8::nrc_y]; + for (int iy = 0; iy < Q8::nrc_y; ++iy) acc[iy] = vdupq_n_f32(0.f); + + for (int i = 0; i < nb/4; ++i) { + q8.process_scales(i, deq, sc16, acc); + sum_4(i, deq, q8, sc16, acc); + } + for (int i = 4*(nb/4); i < nb; ++i) { + q8.process_1_block(i, deq, acc); + } + + for (int iy = 0; iy < Q8::nrc_y; ++iy) { + info.store(ix, iy, vaddvq_f32(acc[iy])); + } + } +} + +template +inline void mul_mat_qX_Y_q8_Y_1(int n, Dequantizer& deq1, Dequantizer& deq2, Q8& q8, const DataInfo& info, int nrc_x) { + const int nb = n / QK4_1; + + float16x4_t sc16[2]; + + for (int ix = 0; ix < nrc_x; ++ix) { + + deq1.new_row(ix); + deq2.new_row(ix); + + float32x4_t acc[2] = { vdupq_n_f32(0.f), vdupq_n_f32(0.f) }; + + for (int i = 0; i < nb/8; ++i) { + q8.process_scales(2*i+0, deq1, sc16+0, acc+0); + q8.process_scales(2*i+1, deq2, sc16+1, acc+1); + sum_4(2*i+0, deq1, q8, sc16+0, acc+0); + sum_4(2*i+1, deq2, q8, sc16+1, acc+1); + } + for (int i = 2*(nb/8); i < nb/4; ++i) { + q8.process_scales(i, deq1, sc16, acc); + sum_4(i, deq1, q8, sc16, acc); + } + for (int i = 4*(nb/4); i < nb; ++i) { + q8.process_1_block(i, deq1, acc); + } + + info.store(ix, 0, vaddvq_f32(vaddq_f32(acc[0], acc[1]))); + } +} + +template +static void IQK_NOINLINE mul_mat_qX_1_q8_1(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) { + Q81 q8(info); + if constexpr (nrc_y == 1) { + Dequantizer deq1(vx, bx), deq2(vx, bx); + mul_mat_qX_Y_q8_Y_1(n, deq1, deq2, q8, info, nrc_x); + } else { + Dequantizer deq(vx, bx); + mul_mat_qX_Y_q8_Y(n, deq, q8, info, nrc_x); + } +} + +template +static void IQK_NOINLINE mul_mat_qX_0_q8_0(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) { + Q80 q8(info); + if constexpr (nrc_y == 1) { + Dequantizer deq1(vx, bx), deq2(vx, bx); + mul_mat_qX_Y_q8_Y_1(n, deq1, deq2, q8, info, nrc_x); + } else { + Dequantizer deq(vx, bx); + mul_mat_qX_Y_q8_Y(n, deq, q8, info, nrc_x); + } +} + +template +static void IQK_NOINLINE mul_mat_qX_1_q8_1_1(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) { + Dequantizer deq1(vx, bx), deq2(vx, bx); + Q81<1> q8(info); + mul_mat_qX_Y_q8_Y_1(n, deq1, deq2, q8, info, nrc_x); +} + +template +static void IQK_NOINLINE mul_mat_qX_0_q8_0_1(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) { + Dequantizer deq1(vx, bx), deq2(vx, bx); + Q80<1> q8(info); + mul_mat_qX_Y_q8_Y(n, deq1, deq2, q8, info, nrc_x); +} + +template void MulMat::set_functions(MulMat& m) { + if constexpr (std::is_same_v || std::is_same_v || + std::is_same_v) { + m.funcs[0] = mul_mat_qX_0_q8_0; + m.funcs[1] = mul_mat_qX_0_q8_0; + m.funcs[2] = mul_mat_qX_0_q8_0; + m.funcs[3] = mul_mat_qX_0_q8_0; + m.funcs[4] = mul_mat_qX_0_q8_0; + m.funcs[5] = mul_mat_qX_0_q8_0; + m.funcs[6] = mul_mat_qX_0_q8_0; + m.funcs[7] = mul_mat_qX_0_q8_0; + } + else if constexpr (std::is_same_v || std::is_same_v) { + m.funcs[0] = mul_mat_qX_1_q8_1; + m.funcs[1] = mul_mat_qX_1_q8_1; + m.funcs[2] = mul_mat_qX_1_q8_1; + m.funcs[3] = mul_mat_qX_1_q8_1; + m.funcs[4] = mul_mat_qX_1_q8_1; + m.funcs[5] = mul_mat_qX_1_q8_1; + m.funcs[6] = mul_mat_qX_1_q8_1; + m.funcs[7] = mul_mat_qX_1_q8_1; + } + else if constexpr (std::is_same_v || std::is_same_v) { + m.funcs[0] = mul_mat_qX_K_q8_K_IQXXS<1, Dequantizer>; + m.funcs[1] = mul_mat_qX_K_q8_K_IQXXS<2, Dequantizer>; + m.funcs[2] = mul_mat_qX_K_q8_K_IQXXS<3, Dequantizer>; + m.funcs[3] = mul_mat_qX_K_q8_K_IQXXS<4, Dequantizer>; + m.funcs[4] = mul_mat_qX_K_q8_K_IQXXS<5, Dequantizer>; + m.funcs[5] = mul_mat_qX_K_q8_K_IQXXS<6, Dequantizer>; + m.funcs[6] = mul_mat_qX_K_q8_K_IQXXS<7, Dequantizer>; + m.funcs[7] = mul_mat_qX_K_q8_K_IQXXS<8, Dequantizer>; + } + else if constexpr (std::is_same_v || + std::is_same_v || + std::is_same_v) { + m.funcs[0] = mul_mat_qX_K_q8_K_IQ<1, Dequantizer>; + m.funcs[1] = mul_mat_qX_K_q8_K_IQ<2, Dequantizer>; + m.funcs[2] = mul_mat_qX_K_q8_K_IQ<3, Dequantizer>; + m.funcs[3] = mul_mat_qX_K_q8_K_IQ<4, Dequantizer>; + m.funcs[4] = mul_mat_qX_K_q8_K_IQ<5, Dequantizer>; + m.funcs[5] = mul_mat_qX_K_q8_K_IQ<6, Dequantizer>; + m.funcs[6] = mul_mat_qX_K_q8_K_IQ<7, Dequantizer>; + m.funcs[7] = mul_mat_qX_K_q8_K_IQ<8, Dequantizer>; + } + else { + m.funcs[0] = mul_mat_qX_K_q8_K_T<1, Dequantizer>; + m.funcs[1] = mul_mat_qX_K_q8_K_T<2, Dequantizer>; + m.funcs[2] = mul_mat_qX_K_q8_K_T<3, Dequantizer>; + m.funcs[3] = mul_mat_qX_K_q8_K_T<4, Dequantizer>; + m.funcs[4] = mul_mat_qX_K_q8_K_T<5, Dequantizer>; + m.funcs[5] = mul_mat_qX_K_q8_K_T<6, Dequantizer>; + m.funcs[6] = mul_mat_qX_K_q8_K_T<7, Dequantizer>; + m.funcs[7] = mul_mat_qX_K_q8_K_T<8, Dequantizer>; + } +} + +bool MulMat::set_mul_mat(int typeA, int ne00, MulMat& m, int& row_size_q8, int Ny) { + row_size_q8 = ggml_row_size(GGML_TYPE_Q8_K, ne00); + + (void)Ny; + // Uncommenting out this would disable iqk_mul_mat for matrix x vector multiplications. + //if (Ny == 1 && (typeA == GGML_TYPE_IQ2_XXS || typeA == GGML_TYPE_IQ2_XS || typeA == GGML_TYPE_IQ2_S || + // typeA == GGML_TYPE_IQ3_XXS || typeA == GGML_TYPE_IQ3_S)) return false; + + switch (typeA) { + case GGML_TYPE_Q2_K: + MulMat::set_functions(m); + break; + case GGML_TYPE_Q3_K: + MulMat::set_functions(m); + break; + case GGML_TYPE_Q4_K: + MulMat::set_functions(m); + break; + case GGML_TYPE_Q5_K: + MulMat::set_functions(m); + break; + case GGML_TYPE_Q6_K: + MulMat::set_functions(m); + break; + case GGML_TYPE_IQ4_XS: + MulMat::set_functions(m); + break; + case GGML_TYPE_IQ3_S: + MulMat::set_functions(m); + break; + case GGML_TYPE_IQ3_XXS: + MulMat::set_functions(m); + break; + case GGML_TYPE_IQ2_S: + MulMat::set_functions(m); + break; + case GGML_TYPE_IQ2_XS: + MulMat::set_functions(m); + break; + case GGML_TYPE_IQ2_XXS: + MulMat::set_functions(m); + break; + case GGML_TYPE_Q4_0: + MulMat::set_functions(m); + row_size_q8 = ggml_row_size(GGML_TYPE_Q8_0, ne00); + break; + case GGML_TYPE_Q4_1: + MulMat::set_functions(m); + row_size_q8 = ggml_row_size(GGML_TYPE_Q8_1, ne00); + break; + case GGML_TYPE_Q5_0: + MulMat::set_functions(m); + row_size_q8 = ggml_row_size(GGML_TYPE_Q8_0, ne00); + break; + case GGML_TYPE_Q5_1: + MulMat::set_functions(m); + row_size_q8 = ggml_row_size(GGML_TYPE_Q8_1, ne00); + break; + case GGML_TYPE_Q8_0: + MulMat::set_functions(m); + row_size_q8 = ggml_row_size(GGML_TYPE_Q8_0, ne00); + break; + default: + return false; + } + return true; +} + +} + +#endif // __x86_64__ or __aarch64__ diff --git a/third_party/llamafile/iqk_mul_mat_amd_avx2.cpp b/third_party/llamafile/iqk_mul_mat_amd_avx2.cpp new file mode 100644 index 0000000..9e3de18 --- /dev/null +++ b/third_party/llamafile/iqk_mul_mat_amd_avx2.cpp @@ -0,0 +1,8 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/iqk_mul_mat_amd_avx2.cpp +// Copyrigth 2024 Iwan Kawrakow. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#ifdef __x86_64__ +#include "iqk_mul_mat.inc" +#endif // __x86_64__ diff --git a/third_party/llamafile/iqk_mul_mat_amd_zen4.cpp b/third_party/llamafile/iqk_mul_mat_amd_zen4.cpp new file mode 100644 index 0000000..4d0a979 --- /dev/null +++ b/third_party/llamafile/iqk_mul_mat_amd_zen4.cpp @@ -0,0 +1,10 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/iqk_mul_mat_amd_zen4.cpp +// Copyrigth 2024 Iwan Kawrakow. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#ifdef __x86_64__ +#define iqk_mul_mat iqk_mul_mat_zen4 +#define iqk_mul_mat_moe iqk_mul_mat_moe_zen4 +#include "iqk_mul_mat.inc" +#endif // __x86_64__ diff --git a/third_party/llamafile/iqk_mul_mat_arm82.cpp b/third_party/llamafile/iqk_mul_mat_arm82.cpp new file mode 100644 index 0000000..97a0fa1 --- /dev/null +++ b/third_party/llamafile/iqk_mul_mat_arm82.cpp @@ -0,0 +1,10 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/iqk_mul_mat_arm82.cpp +// Copyrigth 2024 Iwan Kawrakow. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#ifdef __aarch64__ +#define iqk_mul_mat iqk_mul_mat_arm82 +#define iqk_mul_mat_moe iqk_mul_mat_moe_arm82 +#include "iqk_mul_mat.inc" +#endif // __aarch64__ diff --git a/third_party/llamafile/macros.h b/third_party/llamafile/macros.h new file mode 100644 index 0000000..a96c0cf --- /dev/null +++ b/third_party/llamafile/macros.h @@ -0,0 +1,14 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/macros.h +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +// -*- mode:c++;indent-tabs-mode:nil;c-basic-offset:4;coding:utf-8 -*- +// vi: set et ft=cpp ts=4 sts=4 sw=4 fenc=utf-8 :vi +#pragma once + +#define MIN(X, Y) ((Y) > (X) ? (X) : (Y)) +#define MAX(X, Y) ((Y) < (X) ? (X) : (Y)) +#define CEIL_DIV(M, N) (((M) + (N) - 1) / (N)) +#define ROUNDUP(X, K) (((X) + (K) - 1) & -(K)) +#define ARRAYLEN(A) ((sizeof(A) / sizeof(*(A))) / ((unsigned)!(sizeof(A) % sizeof(*(A))))) diff --git a/third_party/llamafile/micros.h b/third_party/llamafile/micros.h new file mode 100644 index 0000000..d451322 --- /dev/null +++ b/third_party/llamafile/micros.h @@ -0,0 +1,41 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/micros.h +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +// -*- mode:c++;indent-tabs-mode:nil;c-basic-offset:4;coding:utf-8 -*- +// vi: set et ft=cpp ts=4 sts=4 sw=4 fenc=utf-8 :vi +#pragma once + +#include + +#ifndef _WIN32 +#include +#else +#include +#endif + +#ifdef _WIN32 +static long long GetQueryPerformanceFrequency() { + LARGE_INTEGER t; + QueryPerformanceFrequency(&t); + return t.QuadPart; +} +static long long GetQueryPerformanceCounter() { + LARGE_INTEGER t; + QueryPerformanceCounter(&t); + return t.QuadPart; +} +#endif + +static long long micros(void) { +#ifndef _WIN32 + struct timespec ts; + clock_gettime(CLOCK_REALTIME, &ts); + return ts.tv_sec * 1000000 + (ts.tv_nsec + 999) / 1000; +#else + static long long timer_freq = GetQueryPerformanceFrequency(); + static long long timer_start = GetQueryPerformanceCounter(); + return ((GetQueryPerformanceCounter() - timer_start) * 1000000) / timer_freq; +#endif +} diff --git a/third_party/llamafile/numba.h b/third_party/llamafile/numba.h new file mode 100644 index 0000000..80a2085 --- /dev/null +++ b/third_party/llamafile/numba.h @@ -0,0 +1,59 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/numba.h +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#pragma once + +inline int rand32(void) { + static unsigned long long lcg = 1; + lcg *= 6364136223846793005; + lcg += 1442695040888963407; + return lcg >> 32; +} + +inline int popcount(unsigned x) { + x = x - ((x >> 1) & 0x55555555); + x = ((x >> 2) & 0x33333333) + (x & 0x33333333); + x = (x + (x >> 4)) & 0x0F0F0F0F; + x = (x + (x >> 16)); + return (x + (x >> 8)) & 0x0000003F; +} + +inline int hamming(int x, int y) { + return popcount(x ^ y); +} + +inline float float01(unsigned x) { // (0,1) + return 1.f / 8388608 * ((x >> 9) + .5f); +} + +inline float numba(void) { // (-10,10) + return float01(rand32()) * 2.f - 1.f; +} + +template +void randomize(T* A, int n) { + for (int i = 0; i < n; ++i) + A[i] = numba(); +} + +template +void randomize(int m, int n, T* A, int lda) { + for (int j = 0; j < n; ++j) + for (int i = 0; i < m; ++i) + A[lda * j + i] = numba(); +} + +template +void broadcast(T* A, int n, U x) { + for (int i = 0; i < n; ++i) + A[i] = x; +} + +template +void broadcast(int m, int n, T* A, int lda, U x) { + for (int j = 0; j < n; ++j) + for (int i = 0; i < m; ++i) + A[lda * j + i] = x; +} diff --git a/third_party/llamafile/sgemm.cpp b/third_party/llamafile/sgemm.cpp new file mode 100644 index 0000000..7ec34ff --- /dev/null +++ b/third_party/llamafile/sgemm.cpp @@ -0,0 +1,200 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/sgemm.cpp +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +// -*- mode:c++;indent-tabs-mode:nil;c-basic-offset:4;coding:utf-8 -*- +// vi: set et ft=cpp ts=4 sts=4 sw=4 fenc=utf-8 :vi +// +// Copyright 2024 Mozilla Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "sgemm.h" +// #include +#include +// #include +#include +#include +#include +// #include "llamafile.h" + +static const struct GemmFuncs { + typeof(llamafile_sgemm)* sgemm; + typeof(llamafile_mixmul)* mixmul; + typeof(llamafile_mixmul_iqk)* iqk_mixmul = iqk_mul_mat_moe_unsupported; + GemmFuncs() { +#ifdef __x86_64__ + // if (X86_HAVE(AVX)) { + // if (X86_HAVE(FMA)) { + // if (X86_HAVE(AVX2)) { + // if (X86_HAVE(AVX512F)) { + // if (X86_HAVE(AVX512VL) && // + // X86_HAVE(AVX512BW) && // + // X86_HAVE(AVX512DQ) && // + // X86_HAVE(AVX512_VNNI) && // + // X86_HAVE(AVX512_BF16)) { + // // AMD Zen4+ (2023-) + // sgemm = llamafile_sgemm_amd_zen4; + // mixmul = llamafile_mixmul_amd_zen4; + // iqk_mixmul = iqk_mul_mat_moe_zen4; + // } else { + // // Intel Xeon Skylake+ (2015-) + // sgemm = llamafile_sgemm_amd_avx512f; + // mixmul = llamafile_mixmul_amd_avx512f; + // iqk_mixmul = iqk_mul_mat_moe; + // } + // } else if (X86_HAVE(AVXVNNI)) { + // // Intel Alderlake (2021-) + // sgemm = llamafile_sgemm_amd_avxvnni; + // mixmul = llamafile_mixmul_amd_avxvnni; + // iqk_mixmul = iqk_mul_mat_moe; + // } else { + // // Intel Haswell/Broadwell/Skylake (2013-2020) + // // AMD Excavator (2015-2022) + // sgemm = llamafile_sgemm_amd_avx2; + // mixmul = llamafile_mixmul_amd_avx2; + // if (X86_HAVE(F16C)) + // iqk_mixmul = iqk_mul_mat_moe; + // } + // } else { + // // AMD Piledriver (2011-2014) + // sgemm = llamafile_sgemm_amd_fma; + // mixmul = llamafile_mixmul_amd_fma; + // if (X86_HAVE(F16C)) + // iqk_mixmul = iqk_mul_mat_moe; + // } + // } else { + // // Intel Sandybridge/Ivybridge (2010-2012) + // // AMD Bulldozer (2011) + // sgemm = llamafile_sgemm_amd_avx; + // mixmul = llamafile_mixmul_amd_avx; + // } + // } else { + // // AMD K8/Barcelona (2003-2010) + // // Intel Core/Nehalem (2006-2009) + // sgemm = llamafile_sgemm_unsupported; + // mixmul = llamafile_mixmul_unsupported; + // } +#if defined(__AVX__) +#if defined(__FMA__) +#if defined(__AVX2__) +#if defined(__AVX512F__) +#if defined(__AVX512VL__) && defined(__AVX512BW__) && defined(__AVX512DQ__) && defined(__AVX512VNNI__) && defined(__AVX512BF16__) + // AMD Zen4+ (2023-) + sgemm = llamafile_sgemm_amd_zen4; + mixmul = llamafile_mixmul_amd_zen4; + iqk_mixmul = iqk_mul_mat_moe_zen4; +#else + // Intel Xeon Skylake+ (2015-) + sgemm = llamafile_sgemm_amd_avx512f; + mixmul = llamafile_mixmul_amd_avx512f; + iqk_mixmul = iqk_mul_mat_moe; +#endif +#elif defined(__AVXVNNI__) + // Intel Alderlake (2021-) + sgemm = llamafile_sgemm_amd_avxvnni; + mixmul = llamafile_mixmul_amd_avxvnni; + iqk_mixmul = iqk_mul_mat_moe; +#else + // Intel Haswell/Broadwell/Skylake (2013-2020) + // AMD Excavator (2015-2022) + sgemm = llamafile_sgemm_amd_avx2; + mixmul = llamafile_mixmul_amd_avx2; +#if defined(__F16C__) + iqk_mixmul = iqk_mul_mat_moe; +#endif +#endif +#else + // AMD Piledriver (2011-2014) + sgemm = llamafile_sgemm_amd_fma; + mixmul = llamafile_mixmul_amd_fma; +#if defined(__F16C__) + iqk_mixmul = iqk_mul_mat_moe; +#endif +#endif +#else + // Intel Sandybridge/Ivybridge (2010-2012) + // AMD Bulldozer (2011) + sgemm = llamafile_sgemm_amd_avx; + mixmul = llamafile_mixmul_amd_avx; +#endif +#else + // AMD K8/Barcelona (2003-2010) + // Intel Core/Nehalem (2006-2009) + sgemm = llamafile_sgemm_unsupported; + mixmul = llamafile_mixmul_unsupported; +#endif + +#elif defined(__aarch64__) + long hwcap = getauxval(AT_HWCAP); + if ((hwcap & HWCAP_FPHP) && // fp16 scalar isa (ID_AA64PFR0_EL1.FP == 1) + (hwcap & HWCAP_ASIMDHP) && // fp16 vector isa (ID_AA64PFR0_EL1.AdvSIMD == 1) + (hwcap & HWCAP_ASIMDDP)) { // dotprod isa (ID_AA64ISAR0_EL1.DP == 1) + // e.g. Apple M1, Raspberry Pi 5 + sgemm = llamafile_sgemm_arm82; + mixmul = llamafile_mixmul_arm82; + iqk_mixmul = iqk_mul_mat_moe_arm82; + } else { + // ARM64 baseline ISA + sgemm = llamafile_sgemm_arm80; + mixmul = llamafile_mixmul_arm80; + } +#else + sgemm = llamafile_sgemm_unsupported; + mixmul = llamafile_mixmul_unsupported; +#endif + } +} funcs; + +/** + * Performs optimized matrix multiplication on CPU. + * + * This subroutine may compute C = Aแต€ * B with column major ordering. + * Despite its name, this isn't a generalized implementation. Work is + * only performed when a handwritten kernel is written and available. + * Otherwise the caller should fall back to a general matmul routine. + * + * @param m is rows in `A` and `C` + * @param n is cols in `B` and `C` + * @param k is cols in `A` and rows in `B` + * @param A is first input matrix (always transposed) + * @param lda is row stride of `A` + * @param B is second input matrix (never transposed) + * @param ldb is row stride of `B` + * @param C is input/output array of output matrices + * @param ldc is row stride of `C` + * @param ith is thread id (must be less than `nth`) + * @param nth is number of threads (must be greater than zero) + * @param task is GGML task type + * @param Atype is GGML data type of `A` + * @param Btype is GGML data type of `B` + * @param Ctype is GGML data type of `C` + * @param precision may be used to control the internal compute type + * @return true if this function was able to service the matmul request + */ +bool llamafile_sgemm(long m, long n, long k, const void* A, long lda, const void* B, long ldb, void* C, long ldc, int ith, int nth, int task, int Atype, int Btype, int Ctype, int precision) { + return funcs.sgemm(m, n, k, A, lda, B, ldb, C, ldc, ith, nth, task, Atype, Btype, Ctype, + precision); +} + +/** + * Performs "mixture of experts" tensor multiplication on CPU. + */ +bool llamafile_mixmul(const ggml_compute_params* params, const ggml_tensor* weights, const ggml_tensor* thought, const ggml_tensor* plan, ggml_tensor* result) { + return funcs.mixmul(params, weights, thought, plan, result); +} + +bool llamafile_mixmul_iqk(long Nx, long Ny, long ne00, int ne11, int typeA, const void* A, const void* B, float* C, long nb1, long nb2, const void* vrow_mapping, int ith, int nth) { + return funcs.iqk_mixmul(Nx, Ny, ne00, ne11, typeA, A, B, C, nb1, nb2, vrow_mapping, ith, nth); +} diff --git a/third_party/llamafile/sgemm.h b/third_party/llamafile/sgemm.h new file mode 100644 index 0000000..09c47d8 --- /dev/null +++ b/third_party/llamafile/sgemm.h @@ -0,0 +1,52 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/sgemm.h +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#pragma once +#include +#include +#ifdef __cplusplus +extern "C" { +#endif + +struct ggml_tensor; +struct ggml_compute_params; + +bool iqk_mul_mat(long, long, long, int, const void*, const void*, float*, long, int, int); +bool iqk_mul_mat_zen4(long, long, long, int, const void*, const void*, float*, long, int, int); +bool iqk_mul_mat_arm82(long, long, long, int, const void*, const void*, float*, long, int, int); + +bool iqk_mul_mat_moe(long, long, long, int, int, const void*, const void*, float*, long, long, const void*, int, int); +bool iqk_mul_mat_moe_zen4(long, long, long, int, int, const void*, const void*, float*, long, long, const void*, int, int); +bool iqk_mul_mat_moe_arm82(long, long, long, int, int, const void*, const void*, float*, long, long, const void*, int, int); +bool iqk_mul_mat_moe_unsupported(long, long, long, int, int, const void*, const void*, float*, long, long, const void*, int, int); + +bool llamafile_sgemm(long, long, long, const void*, long, const void*, long, void*, long, int, int, int, int, int, int, int); +bool llamafile_mixmul(const struct ggml_compute_params*, const struct ggml_tensor*, const struct ggml_tensor*, const struct ggml_tensor*, struct ggml_tensor*); +size_t llamafile_mixmul_needs(const struct ggml_tensor*, const struct ggml_tensor*, const struct ggml_tensor*); + +bool llamafile_sgemm_unsupported(long, long, long, const void*, long, const void*, long, void*, long, int, int, int, int, int, int, int); +bool llamafile_sgemm_amd_avx(long, long, long, const void*, long, const void*, long, void*, long, int, int, int, int, int, int, int); +bool llamafile_sgemm_amd_fma(long, long, long, const void*, long, const void*, long, void*, long, int, int, int, int, int, int, int); +bool llamafile_sgemm_amd_avx2(long, long, long, const void*, long, const void*, long, void*, long, int, int, int, int, int, int, int); +bool llamafile_sgemm_amd_avxvnni(long, long, long, const void*, long, const void*, long, void*, long, int, int, int, int, int, int, int); +bool llamafile_sgemm_amd_avx512f(long, long, long, const void*, long, const void*, long, void*, long, int, int, int, int, int, int, int); +bool llamafile_sgemm_amd_zen4(long, long, long, const void*, long, const void*, long, void*, long, int, int, int, int, int, int, int); +bool llamafile_sgemm_arm80(long, long, long, const void*, long, const void*, long, void*, long, int, int, int, int, int, int, int); +bool llamafile_sgemm_arm82(long, long, long, const void*, long, const void*, long, void*, long, int, int, int, int, int, int, int); + +bool llamafile_mixmul_unsupported(const struct ggml_compute_params*, const struct ggml_tensor*, const struct ggml_tensor*, const struct ggml_tensor*, struct ggml_tensor*); +bool llamafile_mixmul_amd_avx(const struct ggml_compute_params*, const struct ggml_tensor*, const struct ggml_tensor*, const struct ggml_tensor*, struct ggml_tensor*); +bool llamafile_mixmul_amd_fma(const struct ggml_compute_params*, const struct ggml_tensor*, const struct ggml_tensor*, const struct ggml_tensor*, struct ggml_tensor*); +bool llamafile_mixmul_amd_avx2(const struct ggml_compute_params*, const struct ggml_tensor*, const struct ggml_tensor*, const struct ggml_tensor*, struct ggml_tensor*); +bool llamafile_mixmul_amd_avxvnni(const struct ggml_compute_params*, const struct ggml_tensor*, const struct ggml_tensor*, const struct ggml_tensor*, struct ggml_tensor*); +bool llamafile_mixmul_amd_avx512f(const struct ggml_compute_params*, const struct ggml_tensor*, const struct ggml_tensor*, const struct ggml_tensor*, struct ggml_tensor*); +bool llamafile_mixmul_amd_zen4(const struct ggml_compute_params*, const struct ggml_tensor*, const struct ggml_tensor*, const struct ggml_tensor*, struct ggml_tensor*); +bool llamafile_mixmul_arm80(const struct ggml_compute_params*, const struct ggml_tensor*, const struct ggml_tensor*, const struct ggml_tensor*, struct ggml_tensor*); +bool llamafile_mixmul_arm82(const struct ggml_compute_params*, const struct ggml_tensor*, const struct ggml_tensor*, const struct ggml_tensor*, struct ggml_tensor*); +bool llamafile_mixmul_iqk(long, long, long, int, int, const void*, const void*, float*, long, long, const void*, int, int); + +#ifdef __cplusplus +} +#endif diff --git a/third_party/llamafile/tinyblas_cpu.h b/third_party/llamafile/tinyblas_cpu.h new file mode 100644 index 0000000..f361c0c --- /dev/null +++ b/third_party/llamafile/tinyblas_cpu.h @@ -0,0 +1,1054 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/tinyblas_cpu.h +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +// -*- mode:c++;indent-tabs-mode:nil;c-basic-offset:4;coding:utf-8 -*- +// vi: set et ft=cpp ts=4 sts=4 sw=4 fenc=utf-8 :vi +// +// Copyright 2024 Mozilla Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +// +// โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•— โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— +// โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•”โ•โ•โ•โ• +// โ•šโ•โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–ˆโ–„โ–ˆโ–ˆโ•‘โ–ˆโ–ˆ โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•”โ•โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— +// โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–€โ–ˆโ–ˆโ–ˆโ•‘โ•šโ–ˆโ–ˆโ–ˆโ•”โ•โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•‘โ•”โ•โ•โ•โ–ˆโ–ˆโ•‘ +// โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•”โ•โ–ˆโ–ˆโ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•‘ +// โ•šโ•โ• โ•šโ•โ•โ•šโ•โ• โ•šโ•โ• โ•šโ•โ•โ• โ•šโ•โ•โ•โ•โ•โ• โ•šโ•โ•โ•โ•โ•šโ•โ• โ•šโ•โ•โ•šโ•โ•โ•โ•โ•โ• +// +// BASIC LINEAR ALGEBRA SUBPROGRAMS +// +// +// This file implements multithreaded CPU matrix multiplication for the +// common contiguous use case C = Aแต€ * B. These kernels are designed to +// have excellent performance[1] for matrices that fit in the CPU cache +// without imposing any overhead such as cache filling or malloc calls. +// +// This implementation does not guarantee any upper bound with rounding +// errors, which grow along with k. Our goal's to maximally exploit the +// hardware for performance, and then use whatever resources remain for +// improving numerical accuracy. +// +// [1] J. Tunney, โ€˜LLaMA Now Goes Faster on CPUsโ€™, Mar. 2024. [Online]. +// Available: https://justine.lol/matmul/. [Accessed: 29-Mar-2024]. + +#pragma once + +#include "llama.cpp/ggml-impl.h" +#include "llama.cpp/ggml-quants.h" +// #include "log.h" +#include "flags.h" +#include "sgemm.h" +// #include + +#pragma GCC diagnostic ignored "-Wpedantic" +#pragma GCC diagnostic ignored "-Wignored-attributes" + +#define ROW_ALIGN 64 +#define MATRIX_ALIGN 4096 +#define MAX_ALIGN 4096 + +#ifdef _MSC_VER +#define NOINLINE __declspec(noinline) +#else +#define NOINLINE __attribute__((__noinline__)) +#endif + +#if defined(__ARM_NEON) || defined(__AVX512F__) +#define VECTOR_REGISTERS 32 +#else +#define VECTOR_REGISTERS 16 +#endif + +#if 0 +#define NOT_SUPPORTED tinyBLAS_not_supported(__FILE__, __LINE__) +#else +#define NOT_SUPPORTED false +#endif +#define WANT_QUANTIZATION false + +namespace { + +bool tinyBLAS_not_supported(const char* file, int line) { + // tinylogf("%s:%d: tinyBLAS not supported\n", file, line); + return false; +} + +inline float unhalf(ggml_fp16_t d) { + return GGML_FP16_TO_FP32(d); +} +inline float unhalf(ggml_bf16_t d) { + return GGML_BF16_TO_FP32(d); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// MATRIX MEMORY INDEXING + +#define NCA 1 +#define NCB 2 +#define NCC 4 + +#define INDEX(A, lda, j, i) (CONFIG & NC##A ? ((T##A**)A)[j] + i : A + lda * (j) + i) + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// GGML TYPE TRAITS + +template +struct ggml_type_trait; +template <> +struct ggml_type_trait { + static constexpr ggml_type id = GGML_TYPE_F32; +}; +template <> +struct ggml_type_trait { + static constexpr ggml_type id = GGML_TYPE_BF16; +}; +template <> +struct ggml_type_trait { + static constexpr ggml_type id = GGML_TYPE_F16; +}; +template <> +struct ggml_type_trait { + static constexpr ggml_type id = GGML_TYPE_Q8_0; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// VECTORIZED ARITHMETIC OPERATIONS + +#if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) +inline __m128 add(__m128 x, __m128 y) { + return _mm_add_ps(x, y); +} +inline __m128 sub(__m128 x, __m128 y) { + return _mm_sub_ps(x, y); +} +inline __m128 mul(__m128 x, __m128 y) { + return _mm_mul_ps(x, y); +} +#endif // __SSE__ + +#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) +inline __m256 add(__m256 x, __m256 y) { + return _mm256_add_ps(x, y); +} +inline __m256 sub(__m256 x, __m256 y) { + return _mm256_sub_ps(x, y); +} +inline __m256 mul(__m256 x, __m256 y) { + return _mm256_mul_ps(x, y); +} +#endif // __AVX__ + +#if defined(__AVX512F__) +inline __m512 add(__m512 x, __m512 y) { + return _mm512_add_ps(x, y); +} +inline __m512 sub(__m512 x, __m512 y) { + return _mm512_sub_ps(x, y); +} +inline __m512 mul(__m512 x, __m512 y) { + return _mm512_mul_ps(x, y); +} +#endif // __AVX512F__ + +#if defined(__ARM_NEON) +inline float32x4_t add(float32x4_t x, float32x4_t y) { + return vaddq_f32(x, y); +} +inline float32x4_t sub(float32x4_t x, float32x4_t y) { + return vsubq_f32(x, y); +} +inline float32x4_t mul(float32x4_t x, float32x4_t y) { + return vmulq_f32(x, y); +} +#endif // __ARM_NEON + +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) +inline float16x8_t add(float16x8_t x, float16x8_t y) { + return vaddq_f16(x, y); +} +inline float16x8_t sub(float16x8_t x, float16x8_t y) { + return vsubq_f16(x, y); +} +inline float16x8_t mul(float16x8_t x, float16x8_t y) { + return vmulq_f16(x, y); +} +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// VECTORIZED FUSED MULTIPLY ADD + +/** + * Computes a * b + c. + */ +template +inline U madd(T a, T b, U c) { + return add(mul(a, b), c); +} + +/** + * Computes a * b + c with error correction. + * + * @see W. Kahan, "Further remarks on reducing truncation errors," + * Communications of the ACM, vol. 8, no. 1, p. 40, Jan. 1965, + * doi: 10.1145/363707.363723. + */ +template +inline U madder(T a, T b, U c, U* e) { + U y = sub(mul(a, b), *e); + U t = add(c, y); + *e = sub(sub(t, c), y); + return t; +} + +#ifdef __ARM_NEON +inline float32x4_t badder(float32x4_t a, float b, float32x4_t c, float32x4_t* e) { + float32x4_t y = sub(vmulq_n_f32(a, b), *e); + float32x4_t t = add(c, y); + *e = sub(sub(t, c), y); + return t; +} +#endif + +#if defined(__FMA__) +#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) +template <> +inline __m256 madd(__m256 a, __m256 b, __m256 c) { + return _mm256_fmadd_ps(a, b, c); +} +#endif +#if defined(__AVX512F__) +template <> +inline __m512 madd(__m512 a, __m512 b, __m512 c) { + return _mm512_fmadd_ps(a, b, c); +} +#endif +#endif + +#if defined(__ARM_FEATURE_FMA) +template <> +inline float32x4_t madd(float32x4_t a, float32x4_t b, float32x4_t c) { + return vfmaq_f32(c, a, b); +} +#if 0 // todo: this specialization chops gcc 12.3 performance in half +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(_MSC_VER) && 0 +template <> +inline float16x8_t madd(float16x8_t a, float16x8_t b, float16x8_t c) { + return vfmaq_f16(c, b, a); +} +#endif +#endif +#endif + +#if defined(__AVX512BF16__) +template <> +inline __m512 madd(__m512bh x, __m512bh y, __m512 z) { + return _mm512_dpbf16_ps(z, x, y); +} +template <> +inline __m512 madder(__m512bh x, __m512bh y, __m512 z, __m512* _) { + return _mm512_dpbf16_ps(z, x, y); +} +#endif + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// VECTORIZED HORIZONTAL SUM + +#if defined(__ARM_NEON) +inline float hsum(float32x4_t x) { + return vaddvq_f32(x); +} +#endif // __ARM_NEON + +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(_MSC_VER) +inline float hsum(float16x8_t x) { + // todo: this works great on clang but it produces terrible code on gcc 12.3 + return vaddvq_f32(vaddq_f32(vcvt_f32_f16(vget_low_f16(x)), vcvt_f32_f16(vget_high_f16(x)))); +} +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) +inline float hsum(__m128 x) { +#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) + x = _mm_add_ps(x, _mm_movehl_ps(x, x)); + x = _mm_add_ss(x, _mm_movehdup_ps(x)); +#else + __m128 t; + t = _mm_shuffle_ps(x, x, _MM_SHUFFLE(2, 3, 0, 1)); + x = _mm_add_ps(x, t); + t = _mm_movehl_ps(t, x); + x = _mm_add_ss(x, t); +#endif + return _mm_cvtss_f32(x); +} +#endif + +#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) +inline float hsum(__m256 x) { + return hsum(_mm_add_ps(_mm256_extractf128_ps(x, 1), _mm256_castps256_ps128(x))); +} +#endif // __AVX__ + +#if defined(__AVX512F__) +inline float hsum(__m512 x) { + return _mm512_reduce_add_ps(x); +} +#endif // __AVX512F__ + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// VECTORIZED MEMORY LOADING + +template +T load(const U*); + +template <> +inline float load(const float* p) { + return *p; +} +template <> +inline float load(const ggml_fp16_t* p) { + return unhalf(*p); +} +template <> +inline float load(const ggml_bf16_t* p) { + return unhalf(*p); +} + +#if defined(__ARM_NEON) +template <> +inline float32x4_t load(const float* p) { + return vld1q_f32(p); +} +template <> +inline float32x4_t load(const ggml_bf16_t* p) { + return vreinterpretq_f32_u32(vshll_n_u16(vld1_u16((const unsigned short*)p), 16)); +} +#if !defined(_MSC_VER) +template <> +inline float16x8_t load(const ggml_fp16_t* p) { + return vld1q_f16((const float16_t*)p); +} +template <> +inline float32x4_t load(const ggml_fp16_t* p) { + return vcvt_f32_f16(vld1_f16((const float16_t*)p)); +} +#endif // _MSC_VER +#endif // __ARM_NEON + +#if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) +template <> +inline __m128 load(const float* p) { + return _mm_loadu_ps(p); +} +#endif // __SSE__ + +#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) +template <> +inline __m256 load(const float* p) { + return _mm256_loadu_ps(p); +} +#endif // __AVX__ + +#if defined(__AVX2__) || defined(__AVX512F__) +template <> +inline __m256 load(const ggml_bf16_t* p) { + return _mm256_castsi256_ps( + _mm256_slli_epi32(_mm256_cvtepu16_epi32(_mm_loadu_si128((const __m128i*)p)), 16)); +} +#endif // __AVX2__ + +#if defined(__F16C__) +template <> +inline __m256 load(const ggml_fp16_t* p) { + return _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)p)); +} +#endif // __F16C__ + +#if defined(__AVX512F__) +template <> +inline __m512 load(const float* p) { + return _mm512_loadu_ps(p); +} +template <> +inline __m512 load(const ggml_fp16_t* p) { + return _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i*)p)); +} +template <> +inline __m512 load(const ggml_bf16_t* p) { + return _mm512_castsi512_ps( + _mm512_slli_epi32(_mm512_cvtepu16_epi32(_mm256_loadu_si256((const __m256i*)p)), 16)); +} +#endif // __AVX512F__ + +#if defined(__AVX512BF16__) +template <> +inline __m512bh load(const ggml_bf16_t* p) { + return (__m512bh)_mm512_loadu_ps((const float*)p); +} +template <> +inline __m512bh load(const float* p) { + return _mm512_cvtne2ps_pbh(_mm512_loadu_ps(p + 16), _mm512_loadu_ps(p)); +} +#endif // __AVX512BF16__ + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// FLOATING POINT OUTPUT STREAMING + +inline void store(float* p, float f) { + *p = f; +} + +inline void store(ggml_fp16_t* p, float f) { + *p = GGML_FP32_TO_FP16(f); +} + +inline void store(ggml_bf16_t* p, float f) { + *p = GGML_FP32_TO_BF16(f); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// FLOATING POINT MATRIX MULTIPLICATION + +template +class tinyBLAS { + public: + tinyBLAS(long k, const TA* A, long lda, const TB* B, long ldb, TC* C, long ldc, int ith, int nth) + : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) { + } + + void matmul(long m, long n, int task) { + if (task == GGML_TASK_TYPE_COMPUTE) + mnpack(0, m, 0, n); + } + + private: + NOINLINE void mnpack(long m0, long m, long n0, long n) { + long mc, nc, mp, np; + +#if VECTOR_REGISTERS == 32 + if (!FLAG_precise) { + switch ((MIN(m - m0, 5) << 4) | MIN(n - n0, 5)) { + case 0x55: + mc = 5; + nc = 5; + gemm<5, 5, false>(m0, m, n0, n); + break; + case 0x54: + case 0x53: + case 0x52: + case 0x45: + case 0x44: + case 0x43: + case 0x42: + case 0x35: + case 0x34: + case 0x33: + case 0x32: + case 0x25: + case 0x24: + case 0x23: + case 0x22: + mc = 2; + nc = 2; + gemm<2, 2, false>(m0, m, n0, n); + break; + case 0x51: + case 0x41: + case 0x31: + case 0x21: + mc = 2; + nc = 1; + gemm<2, 1, false>(m0, m, n0, n); + break; + case 0x15: + case 0x14: + case 0x13: + case 0x12: + mc = 1; + nc = 2; + gemm<1, 2, false>(m0, m, n0, n); + break; + case 0x11: + mc = 1; + nc = 1; + gemm<1, 1, false>(m0, m, n0, n); + break; + default: + return; + } + } else { + switch ((MIN(m - m0, 4) << 4) | MIN(n - n0, 3)) { + case 0x43: + mc = 4; + nc = 3; + gemm<4, 3, true>(m0, m, n0, n); + break; + case 0x42: + case 0x33: + case 0x32: + case 0x23: + case 0x22: + mc = 2; + nc = 2; + gemm<2, 2, true>(m0, m, n0, n); + break; + case 0x41: + case 0x31: + case 0x21: + mc = 2; + nc = 1; + gemm<2, 1, true>(m0, m, n0, n); + break; + case 0x13: + case 0x12: + mc = 1; + nc = 2; + gemm<1, 2, true>(m0, m, n0, n); + break; + case 0x11: + mc = 1; + nc = 1; + gemm<1, 1, true>(m0, m, n0, n); + break; + default: + return; + } + } +#endif + +#if VECTOR_REGISTERS == 16 + if (!FLAG_precise) { + switch ((MIN(m - m0, 4) << 4) | MIN(n - n0, 3)) { + case 0x43: + mc = 4; + nc = 3; + gemm<4, 3, false>(m0, m, n0, n); + break; + case 0x42: + case 0x33: + case 0x32: + case 0x23: + case 0x22: + mc = 2; + nc = 2; + gemm<2, 2, false>(m0, m, n0, n); + break; + case 0x41: + case 0x31: + case 0x21: + mc = 2; + nc = 1; + gemm<2, 1, false>(m0, m, n0, n); + break; + case 0x13: + case 0x12: + mc = 1; + nc = 2; + gemm<1, 2, false>(m0, m, n0, n); + break; + case 0x11: + mc = 1; + nc = 1; + gemm<1, 1, false>(m0, m, n0, n); + break; + default: + return; + } + } else { + switch ((MIN(m - m0, 3) << 4) | MIN(n - n0, 2)) { + case 0x32: + mc = 3; + nc = 2; + gemm<3, 2, true>(m0, m, n0, n); + break; + case 0x23: + mc = 2; + nc = 3; + gemm<2, 3, true>(m0, m, n0, n); + break; + case 0x22: + mc = 2; + nc = 2; + gemm<2, 2, true>(m0, m, n0, n); + break; + case 0x31: + case 0x21: + mc = 2; + nc = 1; + gemm<2, 1, true>(m0, m, n0, n); + break; + case 0x12: + mc = 1; + nc = 2; + gemm<1, 2, true>(m0, m, n0, n); + break; + case 0x11: + mc = 1; + nc = 1; + gemm<1, 1, true>(m0, m, n0, n); + break; + default: + return; + } + } +#endif + + mp = m0 + (m - m0) / mc * mc; + np = n0 + (n - n0) / nc * nc; + mnpack(mp, m, n0, np); + mnpack(m0, m, np, n); + } + + template + NOINLINE void gemm(long m0, long m, long n0, long n) { + long ytiles = RM > 1 ? (m - m0) / RM : 1; + long xtiles = RN > 1 ? (n - n0) / RN : 1; + long tiles = xtiles * ytiles; + long duty = (tiles + nth - 1) / nth; + long start = duty * ith; + long end = start + duty; + if (end > tiles) + end = tiles; + for (long job = start; job < end; ++job) { + long ii = m0 + job / xtiles * RM; + long jj = n0 + job % xtiles * RN; + D Cv[RN][RM] = {}; + D Ce[RN][RM] = {}; + for (long l = 0; l < k; l += KN) +#pragma GCC unroll 100 + for (int j = 0; j < RN; ++j) +#pragma GCC unroll 100 + for (int i = 0; i < RM; ++i) + if (PRECISE) + Cv[j][i] = madder(load(INDEX(A, lda, ii + i, l)), // + load(INDEX(B, ldb, jj + j, l)), // + Cv[j][i], &Ce[j][i]); + else + Cv[j][i] = madd(load(INDEX(A, lda, ii + i, l)), // + load(INDEX(B, ldb, jj + j, l)), // + Cv[j][i]); +#pragma GCC unroll 100 + for (int j = 0; j < RN; ++j) +#pragma GCC unroll 100 + for (int i = 0; i < RM; ++i) + store(INDEX(C, ldc, jj + j, ii + i), hsum(Cv[j][i])); + } + } + + const TA* const A; + const TB* const B; + TC* const C; + const long k; + const long lda; + const long ldb; + const long ldc; + const int ith; + const int nth; +}; + +////////////////////////////////////////////////////////////////////////////////////////// +// QUANT ZERO MATRIX MULTIPLICATION + +#if defined(__ARM_FEATURE_DOTPROD) +template +class tinyBLAS_Q0_ARM { + public: + tinyBLAS_Q0_ARM(long k, const TA* A, long lda, const TB* B, long ldb, TC* C, long ldc, int ith, int nth) + : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) { + } + + void matmul(long m, long n, int task) { + if (task == GGML_TASK_TYPE_COMPUTE) + mnpack(0, m, 0, n); + } + + private: + NOINLINE void mnpack(long m0, long m, long n0, long n) { + long mc, nc, mp, np; + + if (!FLAG_precise) { + switch ((MIN(m - m0, 3) << 4) | MIN(n - n0, 3)) { + case 0x33: + mc = 3; + nc = 3; + gemm<3, 3, false>(m0, m, n0, n); + break; + case 0x32: + case 0x23: + case 0x22: + mc = 2; + nc = 2; + gemm<2, 2, false>(m0, m, n0, n); + break; + case 0x31: + case 0x21: + mc = 2; + nc = 1; + gemm<2, 1, false>(m0, m, n0, n); + break; + case 0x13: + case 0x12: + mc = 1; + nc = 2; + gemm<1, 2, false>(m0, m, n0, n); + break; + case 0x11: + mc = 1; + nc = 1; + gemm<1, 1, false>(m0, m, n0, n); + break; + default: + return; + } + } else { + switch ((MIN(m - m0, 3) << 4) | MIN(n - n0, 3)) { + case 0x33: + mc = 3; + nc = 3; + gemm<3, 3, true>(m0, m, n0, n); + break; + case 0x32: + case 0x23: + case 0x22: + mc = 2; + nc = 2; + gemm<2, 2, true>(m0, m, n0, n); + break; + case 0x31: + case 0x21: + mc = 2; + nc = 1; + gemm<2, 1, true>(m0, m, n0, n); + break; + case 0x13: + case 0x12: + mc = 1; + nc = 2; + gemm<1, 2, true>(m0, m, n0, n); + break; + case 0x11: + mc = 1; + nc = 1; + gemm<1, 1, true>(m0, m, n0, n); + break; + default: + return; + } + } + + mp = m0 + (m - m0) / mc * mc; + np = n0 + (n - n0) / nc * nc; + mnpack(mp, m, n0, np); + mnpack(m0, m, np, n); + } + + template + NOINLINE void gemm(long m0, long m, long n0, long n) { + long ytiles = RM > 1 ? (m - m0) / RM : 1; + long xtiles = RN > 1 ? (n - n0) / RN : 1; + long tiles = xtiles * ytiles; + long duty = (tiles + nth - 1) / nth; + long start = duty * ith; + long end = start + duty; + if (end > tiles) + end = tiles; + for (long job = start; job < end; ++job) { + long ii = m0 + job / xtiles * RM; + long jj = n0 + job % xtiles * RN; + float32x4_t Cv[RN][RM] = {}; + float32x4_t Ce[RN][RM] = {}; + for (int l = 0; l < k; ++l) +#pragma GCC unroll 100 + for (int j = 0; j < RN; ++j) +#pragma GCC unroll 100 + for (int i = 0; i < RM; ++i) { + float32x4_t a = vcvtq_f32_s32(vdotq_s32( + vdotq_s32(vdupq_n_s32(0), load_lo(INDEX(A, lda, ii + i, l)), + load_lo(INDEX(B, ldb, jj + j, l))), + load_hi(INDEX(A, lda, ii + i, l)), load_hi(INDEX(B, ldb, jj + j, l)))); + float b = unhalf(INDEX(A, lda, ii + i, l)->d) * + unhalf(INDEX(B, ldb, jj + j, l)->d); + if (PRECISE) + Cv[j][i] = badder(a, b, Cv[j][i], &Ce[j][i]); + else + Cv[j][i] = vmlaq_n_f32(Cv[j][i], a, b); + } +#pragma GCC unroll 100 + for (int j = 0; j < RN; ++j) +#pragma GCC unroll 100 + for (int i = 0; i < RM; ++i) + store(INDEX(C, ldc, jj + j, ii + i), hsum(Cv[j][i])); + } + } + + inline int8x16_t load_lo(const block_q8_0* b) { + return vld1q_s8(b->qs); + } + + inline int8x16_t load_hi(const block_q8_0* b) { + return vld1q_s8(b->qs + 16); + } + + inline int8x16_t load_lo(const block_q4_0* b) { + return vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vld1q_u8(b->qs), vdupq_n_u8(0x0f))), + vdupq_n_s8(0x8)); + } + + inline int8x16_t load_hi(const block_q4_0* b) { + return vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(vld1q_u8(b->qs), 4)), vdupq_n_s8(0x8)); + } + + const TA* const A; + const TB* const B; + TC* const C; + const long k; + const long lda; + const long ldb; + const long ldc; + const int ith; + const int nth; +}; +#endif // __ARM_FEATURE_DOTPROD + +#if defined(__AVX2__) || defined(__AVX512F__) +template +class tinyBLAS_Q0_AVX2 { + public: + tinyBLAS_Q0_AVX2(long k, const TA* A, long lda, const TB* B, long ldb, TC* C, long ldc, int ith, int nth) + : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) { + } + + void matmul(long m, long n, int task) { + if (task == GGML_TASK_TYPE_COMPUTE) + mnpack(0, m, 0, n); + } + + private: + void mnpack(long m0, long m, long n0, long n) { + long mc, nc, mp, np; + +#if VECTOR_REGISTERS == 32 + if (!FLAG_precise) { + switch ((MIN(m - m0, 3) << 4) | MIN(n - n0, 3)) { + case 0x33: + mc = 3; + nc = 3; + gemm<3, 3, false>(m0, m, n0, n); + break; + case 0x32: + case 0x23: + case 0x22: + mc = 2; + nc = 2; + gemm<2, 2, false>(m0, m, n0, n); + break; + case 0x31: + case 0x21: + mc = 2; + nc = 1; + gemm<2, 1, true>(m0, m, n0, n); + break; + case 0x13: + case 0x12: + mc = 1; + nc = 2; + gemm<1, 2, true>(m0, m, n0, n); + break; + case 0x11: + mc = 1; + nc = 1; + gemm<1, 1, true>(m0, m, n0, n); + break; + default: + return; + } + } else { + switch ((MIN(m - m0, 3) << 4) | MIN(n - n0, 3)) { + case 0x33: + mc = 3; + nc = 3; + gemm<3, 3, true>(m0, m, n0, n); + break; + case 0x32: + case 0x23: + case 0x22: + mc = 2; + nc = 2; + gemm<2, 2, true>(m0, m, n0, n); + break; + case 0x31: + case 0x21: + mc = 2; + nc = 1; + gemm<2, 1, true>(m0, m, n0, n); + break; + case 0x13: + case 0x12: + mc = 1; + nc = 2; + gemm<1, 2, true>(m0, m, n0, n); + break; + case 0x11: + mc = 1; + nc = 1; + gemm<1, 1, true>(m0, m, n0, n); + break; + default: + return; + } + } +#endif + +#if VECTOR_REGISTERS == 16 + if (!FLAG_precise) { + switch ((MIN(m - m0, 3) << 4) | MIN(n - n0, 2)) { + case 0x32: + mc = 3; + nc = 2; + gemm<3, 2, false>(m0, m, n0, n); + break; + case 0x23: + mc = 2; + nc = 3; + gemm<2, 3, false>(m0, m, n0, n); + break; + case 0x22: + mc = 2; + nc = 2; + gemm<2, 2, false>(m0, m, n0, n); + break; + case 0x31: + case 0x21: + mc = 2; + nc = 1; + gemm<2, 1, false>(m0, m, n0, n); + break; + case 0x12: + mc = 1; + nc = 2; + gemm<1, 2, false>(m0, m, n0, n); + break; + case 0x11: + mc = 1; + nc = 1; + gemm<1, 1, false>(m0, m, n0, n); + break; + default: + return; + } + } else { + switch ((MIN(m - m0, 2) << 4) | MIN(n - n0, 1)) { + case 0x21: + mc = 2; + nc = 1; + gemm<2, 1, true>(m0, m, n0, n); + break; + case 0x12: + mc = 1; + nc = 2; + gemm<1, 2, true>(m0, m, n0, n); + break; + case 0x11: + mc = 1; + nc = 1; + gemm<1, 1, true>(m0, m, n0, n); + break; + default: + return; + } + } +#endif + + mp = m0 + (m - m0) / mc * mc; + np = n0 + (n - n0) / nc * nc; + mnpack(mp, m, n0, np); + mnpack(m0, m, np, n); + } + + template + NOINLINE void gemm(long m0, long m, long n0, long n) { + long ytiles = RM > 1 ? (m - m0) / RM : 1; + long xtiles = RN > 1 ? (n - n0) / RN : 1; + long tiles = xtiles * ytiles; + long duty = (tiles + nth - 1) / nth; + long start = duty * ith; + long end = start + duty; + if (end > tiles) + end = tiles; + for (long job = start; job < end; ++job) { + long ii = m0 + job / xtiles * RM; + long jj = n0 + job % xtiles * RN; + __m256 Cv[RN][RM] = {}; + __m256 Ce[RN][RM] = {}; + for (long l = 0; l < k; ++l) +#pragma GCC unroll 100 + for (int j = 0; j < RN; ++j) +#pragma GCC unroll 100 + for (int i = 0; i < RM; ++i) { + __m256 a = _mm256_set1_ps(unhalf(INDEX(A, lda, ii + i, l)->d) * + unhalf(INDEX(B, ldb, jj + j, l)->d)); + __m256 b = updot(_mm256_sign_epi8(load(INDEX(A, lda, ii + i, l)), + load(INDEX(A, lda, ii + i, l))), + _mm256_sign_epi8(load(INDEX(B, ldb, jj + j, l)), + load(INDEX(A, lda, ii + i, l)))); + if (PRECISE) + Cv[j][i] = madder(a, b, Cv[j][i], &Ce[j][i]); + else + Cv[j][i] = madd(a, b, Cv[j][i]); + } +#pragma GCC unroll 100 + for (int j = 0; j < RN; ++j) +#pragma GCC unroll 100 + for (int i = 0; i < RM; ++i) + store(INDEX(C, ldc, jj + j, ii + i), hsum(Cv[j][i])); + } + } + + inline __m256i load(const block_q8_0* b) { + return _mm256_loadu_si256((const __m256i*)b->qs); + } + + inline __m256i load(const block_q4_0* b) { + __m128i x = _mm_loadu_si128((const __m128i*)b->qs); + return _mm256_sub_epi8(_mm256_and_si256(_mm256_set1_epi8(15), + _mm256_insertf128_si256(_mm256_castsi128_si256(x), + _mm_srli_epi16(x, 4), 1)), + _mm256_set1_epi8(8)); + } + + inline __m256 updot(__m256i u, __m256i s) { + __m256i res; +#if defined(__AVXVNNI__) || (defined(__AVX512VNNI__) && defined(__AVX512VL__)) + res = _mm256_dpbusd_epi32(_mm256_setzero_si256(), u, s); +#else + res = _mm256_madd_epi16(_mm256_set1_epi16(1), _mm256_maddubs_epi16(u, s)); +#endif + return _mm256_cvtepi32_ps(res); + } + + const TA* const A; + const TB* const B; + TC* const C; + const long k; + const long lda; + const long ldb; + const long ldc; + const int ith; + const int nth; +}; +#endif // __AVX2__ + +} // namespace diff --git a/third_party/llamafile/tinyblas_cpu_mixmul.inc b/third_party/llamafile/tinyblas_cpu_mixmul.inc new file mode 100644 index 0000000..7315639 --- /dev/null +++ b/third_party/llamafile/tinyblas_cpu_mixmul.inc @@ -0,0 +1,411 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/tinyblas_cpu_mixmul.inc +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +// -*- mode:c++;indent-tabs-mode:nil;c-basic-offset:4;coding:utf-8 -*- +// vi: set et ft=cpp ts=4 sts=4 sw=4 fenc=utf-8 :vi +// +// Copyright 2024 Mozilla Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "tinyblas_cpu.h" + +// +// +// โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•— โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— +// โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•”โ•โ•โ•โ• +// โ•šโ•โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–ˆโ–„โ–ˆโ–ˆโ•‘โ–ˆโ–ˆ โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•”โ•โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— +// โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–€โ–ˆโ–ˆโ–ˆโ•‘โ•šโ–ˆโ–ˆโ–ˆโ•”โ•โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•‘โ•”โ•โ•โ•โ–ˆโ–ˆโ•‘ +// โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•”โ•โ–ˆโ–ˆโ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•‘ +// โ•šโ•โ• โ•šโ•โ•โ•šโ•โ• โ•šโ•โ• โ•šโ•โ•โ• โ•šโ•โ•โ•โ•โ•โ• โ•šโ•โ•โ•โ•โ•šโ•โ• โ•šโ•โ•โ•šโ•โ•โ•โ•โ•โ• +// +// MIXTURE OF EXPERTS TENSOR MULTIPLICATION +// +// +// SHAPES +// +// - weights [cols, rows, experts] +// - thought [cols, tasks, tokens] w/ tasks โ‰ค thinkers +// - result [rows, thinkers, tokens] w/ thinkers โ‰ค experts +// - plan [thinkers, tokens] w/ i32 < experts +// +// DEFINITION +// +// for thinker in range(thinkers): +// for token in range(tokens): +// for row in range(rows): +// c = 0 +// for col in range(cols): +// expert = plan[token][thinker] +// a = weights[expert][row][col] +// b = thought[token][thinker % tasks][col] +// c += a * b +// result[token][thinker][row] = c +// +// REGULARITIES +// +// - tokens can be odd +// - thinkers is usually 2 +// - tasks is usually 1 or 2 +// - cols should be a multiple of 64 +// - rows should be a multiple of 64 +// - experts is usually 8 but could be 60 +// - tokens is always 1 for token generation +// - tokens can be huge for prompt processing +// +// EXAMPLE +// +// mixtral 8x7b w/ 217 token prompt +// +// | ne*0 ne*1 ne*2 ne*3 | nb*0 nb*1 nb*2 nb*3 | type +// ========================================================================= +// weights | 16384 6144 8 1 | 18 0x2400 0x3600000 0x1b000000 | q4_0 +// thought | 16384 2 217 1 | 4 0x10000 0x20000 0x1b20000 | f32 +// result | 6144 2 217 1 | 4 0x6000 0xc000 0xa2c000 | f32 +// plan | 2 217 1 1 | 4 0x20 0x1b20 0x1b20 | i32 +// + +namespace { + +class MixMul { + public: + MixMul(const ggml_compute_params* params, const ggml_tensor* weights, const ggml_tensor* thought, const ggml_tensor* plan, ggml_tensor* result) + : params(params), + weights(weights), + thought(thought), + plan(plan), + result(result), + rows(weights->ne[1]), + cols(weights->ne[0]), + experts(weights->ne[2]), + thinkers(plan->ne[0]), + tasks(thought->ne[1]), + tokens(thought->ne[2]), + ldq((cols * 2 + ROW_ALIGN - 1) & -ROW_ALIGN), + wdata_((char*)(((uintptr_t)params->wdata + MAX_ALIGN - 1) & -MAX_ALIGN)), + allocated_(0) { + } + + bool allocate_shared_memory() { + if (!(quantized_thought_ = allocate(MATRIX_ALIGN, tokens * tasks * ldq))) + return false; + if (!(rowptr_result_ = allocate(ROW_ALIGN, experts * tokens * thinkers))) + return false; + if (!(rowptr_thought_ = allocate(ROW_ALIGN, experts * tokens * thinkers))) + return false; + if (!(rowptr_count_ = allocate(sizeof(long), experts))) + return false; + return true; + } + + size_t get_allocated_bytes() { + return (wdata_ - (char*)params->wdata) + allocated_; + } + + bool mixmul() { + // invariants + assert(tasks <= thinkers); + assert(thinkers <= experts); + assert(tokens == plan->ne[1]); + assert(rows == result->ne[0]); + assert(cols == thought->ne[0]); + assert(tokens == result->ne[2]); + assert(thinkers == result->ne[1]); + + // dimensionality + assert(plan->ne[2] == 1); + assert(plan->ne[3] == 1); + assert(result->ne[3] == 1); + assert(weights->ne[3] == 1); + assert(thought->ne[3] == 1); + + // miscellaneous + assert(params->nth > 0); + assert(params->ith < params->nth); + assert(plan->type == GGML_TYPE_I32); + + // check nb01 is convertible to lda + if (weights->nb[1] % ggml_type_size(weights->type)) + return false; + + // no support for column strides + if (result->nb[0] != ggml_type_size(result->type)) + return false; + if (thought->nb[0] != ggml_type_size(thought->type)) + return false; + if (weights->nb[0] != ggml_type_size(weights->type)) + return false; + + // supported output types + switch (result->type) { + case GGML_TYPE_F32: + return mixmuler(); + default: + return false; + } + } + + private: + template + bool mixmuler() { + switch (weights->type) { + case GGML_TYPE_F32: + if (thought->type != GGML_TYPE_F32) + return false; +#if defined(__AVX512F__) + return mixmat<16, 1, tinyBLAS, float, + float, TC>(); +#elif defined(__AVX__) || defined(__AVX2__) + return mixmat<8, 1, tinyBLAS, float, + float, TC>(); +#elif defined(__SSE__) + return mixmat<4, 1, tinyBLAS, float, + float, TC>(); +#elif defined(__ARM_NEON) + return mixmat<4, 1, tinyBLAS, + float, float, TC>(); +#else + return false; +#endif + + case GGML_TYPE_BF16: + if (thought->type != GGML_TYPE_F32 && thought->type != GGML_TYPE_BF16) + return false; +#if defined(__AVX512BF16__) + if (!FLAG_precise) { + return mixmat< + 32, 1, tinyBLAS, + ggml_bf16_t, ggml_bf16_t, TC>(); + } else { + return mixmat<16, 1, + tinyBLAS, + ggml_bf16_t, ggml_bf16_t, TC>(); + } +#elif defined(__AVX512F__) + return mixmat<16, 1, + tinyBLAS, + ggml_bf16_t, ggml_bf16_t, TC>(); +#elif defined(__AVX2__) + return mixmat<8, 1, + tinyBLAS, + ggml_bf16_t, ggml_bf16_t, TC>(); +#elif defined(__ARM_NEON) && !defined(_MSC_VER) + return mixmat< + 4, 1, + tinyBLAS, + ggml_bf16_t, ggml_bf16_t, TC>(); +#else + return false; +#endif + + case GGML_TYPE_F16: + if (thought->type != GGML_TYPE_F32 && thought->type != GGML_TYPE_F16) + return false; +#if defined(__AVX512F__) + return mixmat<16, 1, + tinyBLAS, + ggml_fp16_t, ggml_fp16_t, TC>(); +#elif (defined(__AVX__) || defined(__AVX2__)) && defined(__F16C__) + // if (X86_CHECK(F16C)) { + return mixmat<8, 1, + tinyBLAS, + ggml_fp16_t, ggml_fp16_t, TC>(); + // } else { + // return false; + // } +#elif defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(_MSC_VER) + if (result->op_params[0] == GGML_PREC_F32) { + return mixmat< + 4, 1, + tinyBLAS, + ggml_fp16_t, ggml_fp16_t, TC>(); + } else { + return mixmat< + 8, 1, + tinyBLAS, + ggml_fp16_t, ggml_fp16_t, TC>(); + } +#elif defined(__ARM_NEON) && !defined(_MSC_VER) + return mixmat< + 4, 1, + tinyBLAS, + ggml_fp16_t, ggml_fp16_t, TC>(); +#else + return false; +#endif + + case GGML_TYPE_Q4_0: + if (thought->type != GGML_TYPE_F32 && thought->type != GGML_TYPE_Q8_0) + return false; +#if defined(__AVX2__) || defined(__AVX512F__) + return mixmat<32, 32, tinyBLAS_Q0_AVX2, + block_q4_0, block_q8_0, TC>(); +#elif defined(__ARM_FEATURE_DOTPROD) + return mixmat<32, 32, tinyBLAS_Q0_ARM, + block_q4_0, block_q8_0, TC>(); +#else + return false; +#endif + + case GGML_TYPE_Q8_0: + if (thought->type != GGML_TYPE_F32 && thought->type != GGML_TYPE_Q8_0) + return false; +#if defined(__AVX2__) || defined(__AVX512F__) + return mixmat<32, 32, tinyBLAS_Q0_AVX2, + block_q8_0, block_q8_0, TC>(); +#elif defined(__ARM_FEATURE_DOTPROD) + return mixmat<32, 32, tinyBLAS_Q0_ARM, + block_q8_0, block_q8_0, TC>(); +#else + return false; +#endif + + default: + return false; + } + } + + template + bool mixmat() { + if (cols % KN) + return false; + switch (params->type) { + case GGML_TASK_TYPE_INIT: + if (thought->type != ggml_type_trait::id) + quantize_thought(ggml_type_trait::id); + build_row_pointers(ggml_type_trait::id); + return true; + case GGML_TASK_TYPE_COMPUTE: + assert(!(cols % BS)); + assert(!(weights->nb[1] % sizeof(TA))); + for (int expert = 0; expert < experts; ++expert) { + BLAS tb{cols / BS, + (const TA*)((const char*)weights->data + expert * weights->nb[2]), + (long)(weights->nb[1] / sizeof(TA)), + (const TB*)(rowptr_thought_ + expert * tokens * thinkers), + 0, + (TC*)(rowptr_result_ + expert * tokens * thinkers), + 0, + params->ith, + params->nth}; + tb.matmul(rows, rowptr_count_[expert], GGML_TASK_TYPE_COMPUTE); + } + return true; + default: + return true; + } + } + + void build_row_pointers(ggml_type vec_dot_type) { + for (int expert = params->ith; expert < experts; expert += params->nth) { + long count = 0; + for (long token = 0; token < tokens; ++token) + for (int thinker = 0; thinker < thinkers; ++thinker) + if (expert == *(const int32_t*)((const char*)plan->data + + token * plan->nb[1] + thinker * plan->nb[0])) { + long row = count++; + long idx = expert * thinkers * tokens + row; + rowptr_result_[idx] = + (uintptr_t)((char*)result->data + token * result->nb[2] + + thinker * result->nb[1]); + if (thought->type == vec_dot_type) + rowptr_thought_[idx] = + (uintptr_t)((char*)thought->data + token * thought->nb[2] + + thinker % tasks * thought->nb[1]); + else + rowptr_thought_[idx] = + (uintptr_t)((char*)quantized_thought_ + token * tasks * ldq + + thinker % tasks * ldq); + } + rowptr_count_[expert] = count; + } + } + + void quantize_thought(ggml_type vec_dot_type) { + long chore = 0; + for (long token = 0; token < tokens; ++token) + for (int task = 0; task < tasks; ++task) + if (chore++ % params->nth == params->ith) + quantize_row(quantized_thought_ + token * tasks * ldq + task * ldq, + (const float*)((const char*)thought->data + + token * thought->nb[2] + task * thought->nb[1]), + vec_dot_type); + } + + void quantize_row(void* dst, const float* src, ggml_type type) { + assert((long)ggml_row_size(type, cols) <= ldq); + switch (type) { + case GGML_TYPE_F16: + ggml_fp32_to_fp16_row(src, (ggml_fp16_t*)dst, cols); + break; + case GGML_TYPE_BF16: + ggml_fp32_to_bf16_row(src, (ggml_bf16_t*)dst, cols); + break; + case GGML_TYPE_Q8_0: + quantize_row_q8_0((const float*)src, (block_q8_0*)dst, cols); + break; + default: + GGML_UNREACHABLE(); + } + } + + template + T* allocate(size_t align, size_t elems) { + T* res = nullptr; + size_t need = sizeof(T) * elems; + size_t base = allocated_; + base += align - 1; + base &= -align; + size_t toto = base + need; + if (toto >= allocated_ && toto <= params->wsize) { + res = (T*)(wdata_ + base); + allocated_ = toto; + } + return res; + } + + const ggml_compute_params* const params; + const ggml_tensor* const weights; + const ggml_tensor* const thought; + const ggml_tensor* const plan; + ggml_tensor* const result; + const long rows; + const long cols; + const int experts; + const int thinkers; + const int tasks; + const long tokens; + const long ldq; + + // variables + char* const wdata_; + size_t allocated_; + + // shared memory + long* rowptr_count_ /*[experts]*/; + char* quantized_thought_ /*[tokens][tasks][cols][2]*/; + uintptr_t* rowptr_result_ /*[experts][tokens*thinkers]*/; + uintptr_t* rowptr_thought_ /*[experts][tokens*thinkers]*/; +}; + +} // namespace + +/** + * Performs "mixture of experts" tensor multiplication on CPU. + */ +bool llamafile_mixmul(const ggml_compute_params* params, const ggml_tensor* weights, const ggml_tensor* thought, const ggml_tensor* plan, ggml_tensor* result) { + MixMul mm{params, weights, thought, plan, result}; + return mm.allocate_shared_memory() && mm.mixmul(); +} diff --git a/third_party/llamafile/tinyblas_cpu_mixmul_amd_avx.cpp b/third_party/llamafile/tinyblas_cpu_mixmul_amd_avx.cpp new file mode 100644 index 0000000..255f873 --- /dev/null +++ b/third_party/llamafile/tinyblas_cpu_mixmul_amd_avx.cpp @@ -0,0 +1,24 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/tinyblas_cpu_mixmul_amd_avx.cpp +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#ifdef __x86_64__ +#define llamafile_mixmul llamafile_mixmul_amd_avx +#include "tinyblas_cpu_mixmul.inc" + +/** + * Returns number of shared memory bytes llamafile_mixmul() needs. + */ +size_t llamafile_mixmul_needs(const ggml_tensor* weights, const ggml_tensor* thought, const ggml_tensor* plan) { + ggml_compute_params params{}; + params.wsize = 0x7ffff000; + params.wdata = (void*)0x1000; + MixMul mm{¶ms, weights, thought, plan, 0}; + if (mm.allocate_shared_memory()) + return mm.get_allocated_bytes(); + else + return 0; +} + +#endif // __x86_64__ diff --git a/third_party/llamafile/tinyblas_cpu_mixmul_amd_avx2.cpp b/third_party/llamafile/tinyblas_cpu_mixmul_amd_avx2.cpp new file mode 100644 index 0000000..552d1aa --- /dev/null +++ b/third_party/llamafile/tinyblas_cpu_mixmul_amd_avx2.cpp @@ -0,0 +1,9 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/tinyblas_cpu_mixmul_amd_avx2.cpp +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#ifdef __x86_64__ +#define llamafile_mixmul llamafile_mixmul_amd_avx2 +#include "tinyblas_cpu_mixmul.inc" +#endif // __x86_64__ diff --git a/third_party/llamafile/tinyblas_cpu_mixmul_amd_avx512f.cpp b/third_party/llamafile/tinyblas_cpu_mixmul_amd_avx512f.cpp new file mode 100644 index 0000000..b5e5183 --- /dev/null +++ b/third_party/llamafile/tinyblas_cpu_mixmul_amd_avx512f.cpp @@ -0,0 +1,9 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/tinyblas_cpu_mixmul_amd_avx512f.cpp +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#ifdef __x86_64__ +#define llamafile_mixmul llamafile_mixmul_amd_avx512f +#include "tinyblas_cpu_mixmul.inc" +#endif // __x86_64__ diff --git a/third_party/llamafile/tinyblas_cpu_mixmul_amd_avxvnni.cpp b/third_party/llamafile/tinyblas_cpu_mixmul_amd_avxvnni.cpp new file mode 100644 index 0000000..c2b2790 --- /dev/null +++ b/third_party/llamafile/tinyblas_cpu_mixmul_amd_avxvnni.cpp @@ -0,0 +1,9 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/tinyblas_cpu_mixmul_amd_avxvnni.cpp +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#ifdef __x86_64__ +#define llamafile_mixmul llamafile_mixmul_amd_avxvnni +#include "tinyblas_cpu_mixmul.inc" +#endif // __x86_64__ diff --git a/third_party/llamafile/tinyblas_cpu_mixmul_amd_fma.cpp b/third_party/llamafile/tinyblas_cpu_mixmul_amd_fma.cpp new file mode 100644 index 0000000..6fd25c9 --- /dev/null +++ b/third_party/llamafile/tinyblas_cpu_mixmul_amd_fma.cpp @@ -0,0 +1,9 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/tinyblas_cpu_mixmul_amd_fma.cpp +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#ifdef __x86_64__ +#define llamafile_mixmul llamafile_mixmul_amd_fma +#include "tinyblas_cpu_mixmul.inc" +#endif // __x86_64__ diff --git a/third_party/llamafile/tinyblas_cpu_mixmul_amd_zen4.cpp b/third_party/llamafile/tinyblas_cpu_mixmul_amd_zen4.cpp new file mode 100644 index 0000000..aaac6e1 --- /dev/null +++ b/third_party/llamafile/tinyblas_cpu_mixmul_amd_zen4.cpp @@ -0,0 +1,9 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/tinyblas_cpu_mixmul_amd_zen4.cpp +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#ifdef __x86_64__ +#define llamafile_mixmul llamafile_mixmul_amd_zen4 +#include "tinyblas_cpu_mixmul.inc" +#endif // __x86_64__ diff --git a/third_party/llamafile/tinyblas_cpu_mixmul_arm80.cpp b/third_party/llamafile/tinyblas_cpu_mixmul_arm80.cpp new file mode 100644 index 0000000..ec36775 --- /dev/null +++ b/third_party/llamafile/tinyblas_cpu_mixmul_arm80.cpp @@ -0,0 +1,24 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/tinyblas_cpu_mixmul_arm80.cpp +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#ifdef __aarch64__ +#define llamafile_mixmul llamafile_mixmul_arm80 +#include "tinyblas_cpu_mixmul.inc" + +/** + * Returns number of shared memory bytes llamafile_mixmul() needs. + */ +size_t llamafile_mixmul_needs(const ggml_tensor* weights, const ggml_tensor* thought, const ggml_tensor* plan) { + ggml_compute_params params{}; + params.wsize = 0x7ffff000; + params.wdata = (void*)0x1000; + MixMul mm{¶ms, weights, thought, plan, 0}; + if (mm.allocate_shared_memory()) + return mm.get_allocated_bytes(); + else + return 0; +} + +#endif // __aarch64__ diff --git a/third_party/llamafile/tinyblas_cpu_mixmul_arm82.cpp b/third_party/llamafile/tinyblas_cpu_mixmul_arm82.cpp new file mode 100644 index 0000000..56bb6fc --- /dev/null +++ b/third_party/llamafile/tinyblas_cpu_mixmul_arm82.cpp @@ -0,0 +1,9 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/tinyblas_cpu_mixmul_arm82.cpp +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#ifdef __aarch64__ +#define llamafile_mixmul llamafile_mixmul_arm82 +#include "tinyblas_cpu_mixmul.inc" +#endif // __aarch64__ diff --git a/third_party/llamafile/tinyblas_cpu_sgemm.inc b/third_party/llamafile/tinyblas_cpu_sgemm.inc new file mode 100644 index 0000000..c9d1f47 --- /dev/null +++ b/third_party/llamafile/tinyblas_cpu_sgemm.inc @@ -0,0 +1,364 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/tinyblas_cpu_sgemm.inc +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +// -*- mode:c++;indent-tabs-mode:nil;c-basic-offset:4;coding:utf-8 -*- +// vi: set et ft=cpp ts=4 sts=4 sw=4 fenc=utf-8 :vi +// +// Copyright 2024 Mozilla Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "tinyblas_cpu.h" + +// +// +// โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•— โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— +// โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•”โ•โ•โ•โ• +// โ•šโ•โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–ˆโ–„โ–ˆโ–ˆโ•‘โ–ˆโ–ˆ โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•”โ•โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— +// โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–€โ–ˆโ–ˆโ–ˆโ•‘โ•šโ–ˆโ–ˆโ–ˆโ•”โ•โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•‘โ•”โ•โ•โ•โ–ˆโ–ˆโ•‘ +// โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•”โ•โ–ˆโ–ˆโ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•‘ +// โ•šโ•โ• โ•šโ•โ•โ•šโ•โ• โ•šโ•โ• โ•šโ•โ•โ• โ•šโ•โ•โ•โ•โ•โ• โ•šโ•โ•โ•โ•โ•šโ•โ• โ•šโ•โ•โ•šโ•โ•โ•โ•โ•โ• +// +// BASIC LINEAR ALGEBRA SUBPROGRAMS +// +// +// This file implements multithreaded CPU matrix multiplication for the +// common contiguous use case C = Aแต€ * B. These kernels are designed to +// have excellent performance[1] for matrices that fit in the CPU cache +// without imposing any overhead such as cache filling or malloc calls. +// +// This implementation does not guarantee any upper bound with rounding +// errors, which grow along with k. Our goal's to maximally exploit the +// hardware for performance, and then use whatever resources remain for +// improving numerical accuracy. +// +// [1] J. Tunney, โ€˜LLaMA Now Goes Faster on CPUsโ€™, Mar. 2024. [Online]. +// Available: https://justine.lol/matmul/. [Accessed: 29-Mar-2024]. + +namespace { + +template +bool llamafile_sgemm_impl(long m, long n, long k, const void* A, long lda, const void* B, long ldb, TC* C, long ldc, int ith, int nth, int task, int Atype, int Btype, int Ctype, int precision) { + switch (Atype) { + case GGML_TYPE_F32: { + if (Btype != GGML_TYPE_F32) + return NOT_SUPPORTED; +#if defined(__AVX512F__) + if (k % 16) + return NOT_SUPPORTED; + tinyBLAS<0, 16, __m512, __m512, float, float, TC> tb{ + k, (const float*)A, lda, (const float*)B, ldb, C, ldc, ith, nth}; + tb.matmul(m, n, task); + return true; +#elif defined(__AVX__) || defined(__AVX2__) + if (k % 8) + return NOT_SUPPORTED; + tinyBLAS<0, 8, __m256, __m256, float, float, TC> tb{ + k, (const float*)A, lda, (const float*)B, ldb, C, ldc, ith, nth}; + tb.matmul(m, n, task); + return true; +#elif defined(__ARM_NEON) + if (k % 4) + return NOT_SUPPORTED; + tinyBLAS<0, 4, float32x4_t, float32x4_t, float, float, TC> tb{ + k, (const float*)A, lda, (const float*)B, ldb, C, ldc, ith, nth}; + tb.matmul(m, n, task); + return true; +#else + return NOT_SUPPORTED; +#endif + } + + case GGML_TYPE_BF16: { +#if defined(__AVX512BF16__) + if (k % 32) + return NOT_SUPPORTED; + if (Btype == GGML_TYPE_F32 && n < 2) { + tinyBLAS<0, 16, __m512, __m512, ggml_bf16_t, float, TC> tb{ + k, (const ggml_bf16_t*)A, lda, (const float*)B, ldb, C, ldc, ith, nth}; + tb.matmul(m, n, task); + return true; + } + if (Btype == GGML_TYPE_F32) + return WANT_QUANTIZATION; + if (Btype != GGML_TYPE_BF16) + return NOT_SUPPORTED; + if (!FLAG_precise) { + tinyBLAS<0, 32, __m512, __m512bh, ggml_bf16_t, ggml_bf16_t, TC> tb{ + k, (const ggml_bf16_t*)A, lda, (const ggml_bf16_t*)B, ldb, C, ldc, ith, nth}; + tb.matmul(m, n, task); + return true; + } else { + tinyBLAS<0, 16, __m512, __m512, ggml_bf16_t, ggml_bf16_t, TC> tb{ + k, (const ggml_bf16_t*)A, lda, (const ggml_bf16_t*)B, ldb, C, ldc, ith, nth}; + tb.matmul(m, n, task); + return true; + } +#elif defined(__AVX512F__) + if (k % 16) + return NOT_SUPPORTED; + tinyBLAS<0, 16, __m512, __m512, ggml_bf16_t, float, TC> tb{ + k, (const ggml_bf16_t*)A, lda, (const float*)B, ldb, C, ldc, ith, nth}; + tb.matmul(m, n, task); + return true; +#elif defined(__AVX2__) + if (k % 8) + return NOT_SUPPORTED; + if (Btype != GGML_TYPE_F32) + return NOT_SUPPORTED; + tinyBLAS<0, 8, __m256, __m256, ggml_bf16_t, float, TC> tb{ + k, (const ggml_bf16_t*)A, lda, (const float*)B, ldb, C, ldc, ith, nth}; + tb.matmul(m, n, task); + return true; +#elif defined(__ARM_NEON) && !defined(_MSC_VER) + if (k % 4) + return NOT_SUPPORTED; + if (Btype != GGML_TYPE_F32) + return NOT_SUPPORTED; + tinyBLAS<0, 4, float32x4_t, float32x4_t, ggml_bf16_t, float, TC> tb{ + k, (const ggml_bf16_t*)A, lda, (const float*)B, ldb, C, ldc, ith, nth}; + tb.matmul(m, n, task); + return true; +#else + return NOT_SUPPORTED; +#endif + } + + case GGML_TYPE_F16: { +#if defined(__AVX512F__) + if (k % 16) + return NOT_SUPPORTED; + if (Btype == GGML_TYPE_F32 && n < 2) { + tinyBLAS<0, 16, __m512, __m512, ggml_fp16_t, float, TC> tb{ + k, (const ggml_fp16_t*)A, lda, (const float*)B, ldb, C, ldc, ith, nth}; + tb.matmul(m, n, task); + return true; + } + if (Btype == GGML_TYPE_F32) + return WANT_QUANTIZATION; + if (Btype != GGML_TYPE_F16) + return NOT_SUPPORTED; + tinyBLAS<0, 16, __m512, __m512, ggml_fp16_t, ggml_fp16_t, TC> tb{ + k, (const ggml_fp16_t*)A, lda, (const ggml_fp16_t*)B, ldb, C, ldc, ith, nth}; + tb.matmul(m, n, task); + return true; +#elif (defined(__AVX__) || defined(__AVX2__)) && defined(__F16C__) + // if (X86_CHECK(F16C)) { + if (k % 8) + return NOT_SUPPORTED; + if (Btype == GGML_TYPE_F32 && n < 2) { + tinyBLAS<0, 8, __m256, __m256, ggml_fp16_t, float, TC> tb{ + k, (const ggml_fp16_t*)A, lda, (const float*)B, ldb, C, ldc, ith, nth}; + tb.matmul(m, n, task); + return true; + } + if (Btype == GGML_TYPE_F32) + return WANT_QUANTIZATION; + if (Btype != GGML_TYPE_F16) + return NOT_SUPPORTED; + tinyBLAS<0, 8, __m256, __m256, ggml_fp16_t, ggml_fp16_t, TC> tb{ + k, (const ggml_fp16_t*)A, lda, (const ggml_fp16_t*)B, ldb, C, ldc, ith, nth}; + tb.matmul(m, n, task); + return true; + // } else { + // return NOT_SUPPORTED; + // } +#elif defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(_MSC_VER) + if (n < 2 && !FLAG_precise) + // TODO(jart): Why is ggml_vec_dot_f16_unroll() so fast at matvec? + return NOT_SUPPORTED; + if (precision == GGML_PREC_F32) { + if (k % 4) + return NOT_SUPPORTED; + if (Btype != GGML_TYPE_F32) + return NOT_SUPPORTED; + tinyBLAS<0, 4, float32x4_t, float32x4_t, ggml_fp16_t, float, TC> tb{ + k, (const ggml_fp16_t*)A, lda, (const float*)B, ldb, C, ldc, ith, nth}; + tb.matmul(m, n, task); + return true; + } else { + if (k % 8) + return NOT_SUPPORTED; + if (Btype == GGML_TYPE_F32) + return WANT_QUANTIZATION; + if (Btype != GGML_TYPE_F16) + return NOT_SUPPORTED; + tinyBLAS<0, 8, float16x8_t, float16x8_t, ggml_fp16_t, ggml_fp16_t, TC> tb{ + k, (const ggml_fp16_t*)A, lda, (const ggml_fp16_t*)B, ldb, C, ldc, ith, nth}; + tb.matmul(m, n, task); + return true; + } +#elif defined(__ARM_NEON) && !defined(_MSC_VER) + if (n < 2 && !FLAG_precise) + // TODO(jart): Why is ggml_vec_dot_f16_unroll() so fast at matvec? + return NOT_SUPPORTED; + if (k % 4) + return NOT_SUPPORTED; + if (Btype != GGML_TYPE_F32) + return NOT_SUPPORTED; + tinyBLAS<0, 4, float32x4_t, float32x4_t, ggml_fp16_t, float, TC> tb{ + k, (const ggml_fp16_t*)A, lda, (const float*)B, ldb, C, ldc, ith, nth}; + tb.matmul(m, n, task); + return true; +#else + return NOT_SUPPORTED; +#endif + } + + case GGML_TYPE_Q8_0: { + if (Btype == GGML_TYPE_F32) + return WANT_QUANTIZATION; + if (Btype != GGML_TYPE_Q8_0) + return NOT_SUPPORTED; +#if defined(__AVX2__) || defined(__AVX512F__) + tinyBLAS_Q0_AVX2<0, block_q8_0, block_q8_0, TC> tb{ + k, (const block_q8_0*)A, lda, (const block_q8_0*)B, ldb, C, ldc, ith, nth}; + tb.matmul(m, n, task); + return true; +#elif defined(__ARM_FEATURE_DOTPROD) + tinyBLAS_Q0_ARM<0, block_q8_0, block_q8_0, TC> tb{ + k, (const block_q8_0*)A, lda, (const block_q8_0*)B, ldb, C, ldc, ith, nth}; + tb.matmul(m, n, task); + return true; +#else + return NOT_SUPPORTED; +#endif + } + + case GGML_TYPE_Q4_0: { + if (Btype == GGML_TYPE_F32) + return WANT_QUANTIZATION; + if (Btype != GGML_TYPE_Q8_0) + return NOT_SUPPORTED; +#if defined(__AVX2__) || defined(__AVX512F__) + tinyBLAS_Q0_AVX2<0, block_q4_0, block_q8_0, TC> tb{ + k, (const block_q4_0*)A, lda, (const block_q8_0*)B, ldb, C, ldc, ith, nth}; + tb.matmul(m, n, task); + return true; +#elif defined(__ARM_FEATURE_DOTPROD) + tinyBLAS_Q0_ARM<0, block_q4_0, block_q8_0, TC> tb{ + k, (const block_q4_0*)A, lda, (const block_q8_0*)B, ldb, C, ldc, ith, nth}; + tb.matmul(m, n, task); + return true; +#else + return NOT_SUPPORTED; +#endif + } + + default: + return NOT_SUPPORTED; + } + + (void)m; + (void)n; + (void)k; + (void)A; + (void)lda; + (void)B; + (void)ldb; + (void)C; + (void)ldc; + (void)ith; + (void)nth; + (void)Atype; + (void)Btype; + (void)precision; +} + +} // namespace + +/** + * Performs optimized matrix multiplication on CPU. + * + * This subroutine may compute C = Aแต€ * B with column major ordering. + * Despite its name, this isn't a generalized implementation. Work is + * only performed when a handwritten kernel is written and available. + * Otherwise the caller should fall back to a general matmul routine. + * + * For example, for single-threaded single-precision GEMM you can say + * + * llamafile_sgemm(m, n, k, A, lda, B, ldb, C, ldc, 0, 1, + * GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32, + * GGML_PREC_DEFAULT); + * + * @param m is rows in `A` and `C` + * @param n is cols in `B` and `C` + * @param k is cols in `A` and rows in `B` + * @param A is first input matrix (always transposed) + * @param lda is row stride of `A` + * @param B is second input matrix (never transposed) + * @param ldb is row stride of `B` + * @param C is input/output array of output matrices + * @param ldc is row stride of `C` + * @param ith is thread id (must be less than `nth`) + * @param nth is number of threads (must be greater than zero) + * @param Atype is GGML data type of `A` + * @param Btype is GGML data type of `B` + * @param Ctype is GGML data type of `C` + * @param precision may be used to control the internal compute type + * @return true if this function was able to service the matmul request + */ +bool llamafile_sgemm(long m, long n, long k, const void* A, long lda, const void* B, long ldb, void* C, long ldc, int ith, int nth, int task, int Atype, int Btype, int Ctype, int precision) { + assert(m >= 0); + assert(n >= 0); + assert(k >= 0); + assert(lda >= k); + assert(ldb >= k); + assert(ldc >= m); + assert(nth > 0); + assert(ith < nth); + +#if QK_K == 256 +#if defined(__x86_64__) +#if defined(__AVX2__) && defined(__FMA__) + // if (X86_CHECK(AVX2) && X86_CHECK(FMA)) { + if (Btype == GGML_TYPE_Q8_K && Ctype == GGML_TYPE_F32) { + if (iqk_mul_mat(m, n, k * QK_K, Atype, A, B, (float*)C, ldc, ith, nth)) { + return true; + } + } + if ((Btype == GGML_TYPE_Q8_0 || Btype == GGML_TYPE_Q8_1) && Ctype == GGML_TYPE_F32) { + // assert(QK8_0 == QK8_1 == QK4_0 == QK4_1 == QK5_0 == QK5_1 == 32); + assert((QK8_0 == 32) && (QK8_1 == 32) && (QK4_0 == 32) && (QK4_1 == 32) && (QK5_0 == 32) && (QK5_1 == 32)); + if (iqk_mul_mat(m, n, k * QK8_0, Atype, A, B, (float*)C, ldc, ith, nth)) { + return true; + } + } + // } +#endif +#elif defined __aarch64__ && defined __ARM_FEATURE_DOTPROD && !defined _MSC_VER + if (Btype == GGML_TYPE_Q8_K && Ctype == GGML_TYPE_F32) { + if (iqk_mul_mat(m, n, k * QK_K, Atype, A, B, (float*)C, ldc, ith, nth)) { + return true; + } + } + if ((Btype == GGML_TYPE_Q8_0 || Btype == GGML_TYPE_Q8_1) && Ctype == GGML_TYPE_F32) { + // assert(QK8_0 == QK8_1 == QK4_0 == QK4_1 == QK5_0 == QK5_1 == 32); + assert((QK8_0 == 32) && (QK8_1 == 32) && (QK4_0 == 32) && (QK4_1 == 32) && (QK5_0 == 32) && (QK5_1 == 32)); + if (iqk_mul_mat(m, n, k * QK8_0, Atype, A, B, (float*)C, ldc, ith, nth)) { + return true; + } + } +#endif +#endif + + switch (Ctype) { + case GGML_TYPE_F32: + return llamafile_sgemm_impl(m, n, k, A, lda, B, ldb, (float*)C, ldc, ith, nth, task, Atype, + Btype, Ctype, precision); + default: + return NOT_SUPPORTED; + } +} diff --git a/third_party/llamafile/tinyblas_cpu_sgemm_amd_avx.cpp b/third_party/llamafile/tinyblas_cpu_sgemm_amd_avx.cpp new file mode 100644 index 0000000..e57eda6 --- /dev/null +++ b/third_party/llamafile/tinyblas_cpu_sgemm_amd_avx.cpp @@ -0,0 +1,9 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/tinyblas_cpu_sgemm_amd_avx.cpp +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#ifdef __x86_64__ +#define llamafile_sgemm llamafile_sgemm_amd_avx +#include "tinyblas_cpu_sgemm.inc" +#endif // __x86_64__ diff --git a/third_party/llamafile/tinyblas_cpu_sgemm_amd_avx2.cpp b/third_party/llamafile/tinyblas_cpu_sgemm_amd_avx2.cpp new file mode 100644 index 0000000..0e1fe84 --- /dev/null +++ b/third_party/llamafile/tinyblas_cpu_sgemm_amd_avx2.cpp @@ -0,0 +1,9 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/tinyblas_cpu_sgemm_amd_avx2.cpp +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#ifdef __x86_64__ +#define llamafile_sgemm llamafile_sgemm_amd_avx2 +#include "tinyblas_cpu_sgemm.inc" +#endif // __x86_64__ diff --git a/third_party/llamafile/tinyblas_cpu_sgemm_amd_avx512f.cpp b/third_party/llamafile/tinyblas_cpu_sgemm_amd_avx512f.cpp new file mode 100644 index 0000000..cafcaa2 --- /dev/null +++ b/third_party/llamafile/tinyblas_cpu_sgemm_amd_avx512f.cpp @@ -0,0 +1,9 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/tinyblas_cpu_sgemm_amd_avx512f.cpp +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#ifdef __x86_64__ +#define llamafile_sgemm llamafile_sgemm_amd_avx512f +#include "tinyblas_cpu_sgemm.inc" +#endif // __x86_64__ diff --git a/third_party/llamafile/tinyblas_cpu_sgemm_amd_avxvnni.cpp b/third_party/llamafile/tinyblas_cpu_sgemm_amd_avxvnni.cpp new file mode 100644 index 0000000..5d2ddce --- /dev/null +++ b/third_party/llamafile/tinyblas_cpu_sgemm_amd_avxvnni.cpp @@ -0,0 +1,9 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/tinyblas_cpu_sgemm_amd_avxvnni.cpp +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#ifdef __x86_64__ +#define llamafile_sgemm llamafile_sgemm_amd_avxvnni +#include "tinyblas_cpu_sgemm.inc" +#endif // __x86_64__ diff --git a/third_party/llamafile/tinyblas_cpu_sgemm_amd_fma.cpp b/third_party/llamafile/tinyblas_cpu_sgemm_amd_fma.cpp new file mode 100644 index 0000000..275c9b4 --- /dev/null +++ b/third_party/llamafile/tinyblas_cpu_sgemm_amd_fma.cpp @@ -0,0 +1,9 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/tinyblas_cpu_sgemm_amd_fma.cpp +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#ifdef __x86_64__ +#define llamafile_sgemm llamafile_sgemm_amd_fma +#include "tinyblas_cpu_sgemm.inc" +#endif // __x86_64__ diff --git a/third_party/llamafile/tinyblas_cpu_sgemm_amd_zen4.cpp b/third_party/llamafile/tinyblas_cpu_sgemm_amd_zen4.cpp new file mode 100644 index 0000000..01924a7 --- /dev/null +++ b/third_party/llamafile/tinyblas_cpu_sgemm_amd_zen4.cpp @@ -0,0 +1,10 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/tinyblas_cpu_sgemm_amd_zen4.cpp +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#ifdef __x86_64__ +#define llamafile_sgemm llamafile_sgemm_amd_zen4 +#define iqk_mul_mat iqk_mul_mat_zen4 +#include "tinyblas_cpu_sgemm.inc" +#endif // __x86_64__ diff --git a/third_party/llamafile/tinyblas_cpu_sgemm_arm80.cpp b/third_party/llamafile/tinyblas_cpu_sgemm_arm80.cpp new file mode 100644 index 0000000..b2fa4c6 --- /dev/null +++ b/third_party/llamafile/tinyblas_cpu_sgemm_arm80.cpp @@ -0,0 +1,9 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/tinyblas_cpu_sgemm_arm80.cpp +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#ifdef __aarch64__ +#define llamafile_sgemm llamafile_sgemm_arm80 +#include "tinyblas_cpu_sgemm.inc" +#endif // __aarch64__ diff --git a/third_party/llamafile/tinyblas_cpu_sgemm_arm82.cpp b/third_party/llamafile/tinyblas_cpu_sgemm_arm82.cpp new file mode 100644 index 0000000..28c6331 --- /dev/null +++ b/third_party/llamafile/tinyblas_cpu_sgemm_arm82.cpp @@ -0,0 +1,10 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/tinyblas_cpu_sgemm_arm82.cpp +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +#ifdef __aarch64__ +#define llamafile_sgemm llamafile_sgemm_arm82 +#define iqk_mul_mat iqk_mul_mat_arm82 +#include "tinyblas_cpu_sgemm.inc" +#endif // __aarch64__ diff --git a/third_party/llamafile/tinyblas_cpu_unsupported.cpp b/third_party/llamafile/tinyblas_cpu_unsupported.cpp new file mode 100644 index 0000000..14773a7 --- /dev/null +++ b/third_party/llamafile/tinyblas_cpu_unsupported.cpp @@ -0,0 +1,39 @@ +// Adapted from +// https://github.com/Mozilla-Ocho/llamafile/blob/0.8.8/llamafile/tinyblas_cpu_unsupported.cpp +// Copyrigth 2024 Mozilla Foundation. +// Copyright(c) 2024 by KVCache.AI, All Rights Reserved. + +// -*- mode:c++;indent-tabs-mode:nil;c-basic-offset:4;coding:utf-8 -*- +// vi: set et ft=cpp ts=4 sts=4 sw=4 fenc=utf-8 :vi +// +// Copyright 2024 Mozilla Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "sgemm.h" + +bool llamafile_sgemm_unsupported(long m, long n, long k, const void* A, long lda, const void* B, long ldb, void* C, long ldc, int ith, int nth, int task, int Atype, int Btype, int Ctype, int precision) { + return false; +} + +bool llamafile_mixmul_unsupported(const struct ggml_compute_params* params, + const struct ggml_tensor* weights, + const struct ggml_tensor* thought, + const struct ggml_tensor* plan, + struct ggml_tensor* result) { + return false; +} + +bool iqk_mul_mat_moe_unsupported(long, long, long, int, int, const void*, const void*, float*, long, long, const void*, int, int) { + return false; +} diff --git a/third_party/pybind11 b/third_party/pybind11 new file mode 160000 index 0000000..bb05e08 --- /dev/null +++ b/third_party/pybind11 @@ -0,0 +1 @@ +Subproject commit bb05e0810b87e74709d9f4c4545f1f57a1b386f5