diff --git a/examples/deepseek1.3b_code_generation/README.md b/examples/deepseek1.3b_code_generation/README.md new file mode 100644 index 000000000..32630ac8a --- /dev/null +++ b/examples/deepseek1.3b_code_generation/README.md @@ -0,0 +1,94 @@ +# DeepSeek Coder 代码生成教程与示例 + +本目录包含使用 DeepSeek Coder 模型进行代码生成的教程和示例。DeepSeek Coder 是一个强大的代码生成模型,专为编程领域优化,能够根据自然语言描述生成高质量的代码。 + +## 内容 + +- `deepseek_coder_tutorial.ipynb`: Jupyter Notebook 教程,展示如何使用 DeepSeek Coder 模型进行各种代码生成任务 +- `deepseek_coder_code_generation.py`: 命令行工具,用于生成代码 +- `deepseek_coder_finetuning.py`: 在自定义数据集上微调 DeepSeek Coder 模型的脚本 + +## 基本用法 + +### 安装依赖 + +确保你已经安装了最新版本的 MindNLP: + +```bash +pip install mindnlp +``` + +### 使用命令行工具生成代码 + +```bash +python deepseek_coder_code_generation.py --prompt "实现一个快速排序算法" --max_length 500 +``` + +参数说明: +- `--prompt`: 用于生成代码的自然语言描述 +- `--max_length`: 生成的最大长度 +- `--temperature`: 生成温度 (默认为0.7) +- `--top_p`: 核采样概率 (默认为0.95) +- `--top_k`: Top-K抽样 (默认为50) +- `--model_name_or_path`: 要使用的模型名称或路径 (默认为 "deepseek-ai/deepseek-coder-1.3b-base") + +### 微调 DeepSeek Coder 模型 + +如果你有特定领域的代码数据集,可以使用我们提供的微调脚本来自定义 DeepSeek Coder 模型: + +```bash +python deepseek_coder_finetuning.py \ + --train_file path/to/train.txt \ + --validation_file path/to/validation.txt \ + --output_dir ./deepseek-coder-finetuned \ + --num_train_epochs 3 \ + --per_device_train_batch_size 4 +``` + +对于大型模型,建议使用 LoRA 进行参数高效微调: + +```bash +python deepseek_coder_finetuning.py \ + --train_file path/to/train.txt \ + --output_dir ./deepseek-coder-finetuned \ + --use_lora \ + --lora_rank 8 \ + --lora_alpha 16 +``` + +## 进阶教程 + +查看 `deepseek_coder_tutorial.ipynb` 获取更详细的教程,包括: + +1. 基础代码生成 +2. 高级代码生成示例 +3. 调整生成参数 +4. 提取生成的代码 +5. 实际应用案例 + +## 数据格式 + +对于微调,训练数据应该是文本文件,每个代码样本以 `# ---NEW SAMPLE---` 分隔。例如: + +``` +def fibonacci(n): + if n <= 1: + return n + return fibonacci(n-1) + fibonacci(n-2) +# ---NEW SAMPLE--- +def quick_sort(arr): + if len(arr) <= 1: + return arr + pivot = arr[len(arr) // 2] + left = [x for x in arr if x < pivot] + middle = [x for x in arr if x == pivot] + right = [x for x in arr if x > pivot] + return quick_sort(left) + middle + quick_sort(right) +``` + +## 注意事项 + +- DeepSeek Coder 模型适用于生成多种编程语言的代码,但效果最好的是 Python、JavaScript、Java、C++ 等常用语言 +- 提供更详细和具体的提示通常会得到更好的代码生成结果 +- 对于复杂任务,可以尝试增大 `max_length` 参数值 +- 降低 `temperature` 参数可以获得更确定性的结果,增大可以获得更多样化的输出 \ No newline at end of file diff --git a/examples/deepseek1.3b_code_generation/code_assistant_bot.py b/examples/deepseek1.3b_code_generation/code_assistant_bot.py new file mode 100644 index 000000000..0da11926d --- /dev/null +++ b/examples/deepseek1.3b_code_generation/code_assistant_bot.py @@ -0,0 +1,262 @@ +#!/usr/bin/env python +# coding=utf-8 +""" +基于 DeepSeek Coder 模型的代码助手机器人 +""" + +import os +import argparse +import re +import time +from rich.console import Console +from rich.markdown import Markdown +from rich.panel import Panel +from rich.syntax import Syntax +from prompt_toolkit import PromptSession +from prompt_toolkit.history import FileHistory +from prompt_toolkit.auto_suggest import AutoSuggestFromHistory +from mindnlp.transformers import AutoModelForCausalLM, AutoTokenizer + +console = Console() + +class CodeAssistant: + """代码助手类,使用 DeepSeek Coder 模型提供代码生成和解释服务""" + + def __init__(self, model_name="deepseek-ai/deepseek-coder-1.3b-base"): + """初始化代码助手""" + self.model_name = model_name + + # 加载模型和分词器 + console.print(f"正在加载 [bold]{model_name}[/bold] 模型...", style="yellow") + self.tokenizer = AutoTokenizer.from_pretrained(model_name) + self.model = AutoModelForCausalLM.from_pretrained(model_name) + console.print("模型加载完成!", style="green") + + # 对话历史 + self.conversation_history = [] + + # 命令列表 + self.commands = { + "/help": self.show_help, + "/clear": self.clear_history, + "/save": self.save_conversation, + "/exit": lambda: "exit", + "/examples": self.show_examples + } + + def start(self): + """启动交互式代码助手""" + console.print(Panel.fit( + "[bold]DeepSeek Coder 代码助手[/bold]\n\n" + "一个基于 DeepSeek Coder 模型的代码生成和解释工具\n" + "输入 [bold blue]/help[/bold blue] 查看帮助信息\n" + "输入 [bold blue]/exit[/bold blue] 退出程序", + title="欢迎使用", + border_style="green" + )) + + # 创建历史记录文件 + history_file = os.path.expanduser("~/.code_assistant_history") + session = PromptSession(history=FileHistory(history_file), + auto_suggest=AutoSuggestFromHistory()) + + while True: + try: + user_input = session.prompt("\n[用户] > ") + + # 处理命令 + if user_input.strip().startswith("/"): + command = user_input.strip().split()[0] + if command in self.commands: + result = self.commands[command]() + if result == "exit": + break + continue + + if not user_input.strip(): + continue + + # 将用户输入添加到历史记录 + self.conversation_history.append(f"[用户] {user_input}") + + # 获取回复 + start_time = time.time() + console.print("[AI 思考中...]", style="yellow") + + response = self.generate_response(user_input) + + # 提取代码块 + code_blocks = self.extract_code_blocks(response) + + # 格式化输出 + console.print("\n[AI 助手]", style="bold green") + + # 如果有代码块,特殊处理 + if code_blocks: + parts = re.split(r'```(?:\w+)?\n|```', response) + i = 0 + for part in parts: + if part.strip(): + if i % 2 == 0: # 文本部分 + console.print(Markdown(part.strip())) + else: # 代码部分 + lang = self.detect_language(code_blocks[(i-1)//2]) + console.print(Syntax(code_blocks[(i-1)//2], lang, theme="monokai", + line_numbers=True, word_wrap=True)) + i += 1 + else: + # 没有代码块,直接显示为Markdown + console.print(Markdown(response)) + + elapsed_time = time.time() - start_time + console.print(f"[生成用时: {elapsed_time:.2f}秒]", style="dim") + + # 将回复添加到历史记录 + self.conversation_history.append(f"[AI] {response}") + + except KeyboardInterrupt: + console.print("\n中断操作...", style="bold red") + break + except Exception as e: + console.print(f"\n发生错误: {str(e)}", style="bold red") + + def generate_response(self, prompt, max_length=1000, temperature=0.7): + """生成回复""" + # 处理提示 + if "代码" in prompt or "函数" in prompt or "实现" in prompt or "编写" in prompt: + # 检测是否已经包含了代码格式声明 + if not "```" in prompt: + prompt = f"```python\n# {prompt}\n" + + inputs = self.tokenizer(prompt, return_tensors="ms") + + # 生成回复 + generated_ids = self.model.generate( + inputs.input_ids, + max_length=max_length, + do_sample=True, + temperature=temperature, + top_p=0.95, + top_k=50, + ) + + response = self.tokenizer.decode(generated_ids[0], skip_special_tokens=True) + + # 清理响应,如果有的话 + if prompt in response: + response = response.replace(prompt, "", 1).strip() + + return response + + def extract_code_blocks(self, text): + """从文本中提取代码块""" + pattern = r'```(?:\w+)?\n(.*?)```' + matches = re.findall(pattern, text, re.DOTALL) + return matches + + def detect_language(self, code): + """简单检测代码语言""" + if "def " in code and ":" in code: + return "python" + elif "{" in code and "}" in code and ";" in code: + if "public class" in code or "private" in code: + return "java" + elif "function" in code or "var" in code or "let" in code or "const" in code: + return "javascript" + else: + return "cpp" + elif "<" in code and ">" in code and ("" in code): + return "html" + else: + return "text" + + def show_help(self): + """显示帮助信息""" + help_text = """ + # 可用命令: + + - `/help` - 显示此帮助信息 + - `/clear` - 清除当前对话历史 + - `/save` - 保存当前对话到文件 + - `/examples` - 显示示例提示 + - `/exit` - 退出程序 + + # 使用技巧: + + 1. 提供详细的需求描述以获得更好的代码生成效果 + 2. 如果生成的代码不满意,可以要求修改或优化 + 3. 可以请求解释已有代码或调试问题 + 4. 对复杂功能,建议分步骤请求实现 + """ + console.print(Markdown(help_text)) + + def clear_history(self): + """清除对话历史""" + self.conversation_history = [] + console.print("已清除对话历史", style="green") + + def save_conversation(self): + """保存对话到文件""" + if not self.conversation_history: + console.print("没有对话内容可保存", style="yellow") + return + + filename = f"code_assistant_conversation_{int(time.time())}.md" + with open(filename, "w", encoding="utf-8") as f: + f.write("# DeepSeek Coder 代码助手对话记录\n\n") + for entry in self.conversation_history: + if entry.startswith("[用户]"): + f.write(f"## {entry}\n\n") + else: + f.write(f"{entry[5:]}\n\n") + + console.print(f"对话已保存到 {filename}", style="green") + + def show_examples(self): + """显示示例提示""" + examples = """ + # 示例提示: + + 1. "实现一个Python函数,计算两个日期之间的工作日数量" + + 2. "编写一个简单的Flask API,具有用户注册和登录功能" + + 3. "创建一个二分查找算法的JavaScript实现" + + 4. "使用pandas分析CSV数据并生成统计报告" + + 5. "实现一个简单的React组件,显示待办事项列表" + + 6. "解释以下代码的功能: + ```python + def mystery(arr): + return [x for x in arr if x == x[::-1]] + ```" + + 7. "优化下面的排序算法: + ```python + def sort(arr): + for i in range(len(arr)): + for j in range(len(arr)): + if arr[i] < arr[j]: + arr[i], arr[j] = arr[j], arr[i] + return arr + ```" + """ + console.print(Markdown(examples)) + + +def main(): + """主函数""" + parser = argparse.ArgumentParser(description="DeepSeek Coder 代码助手") + parser.add_argument("--model", type=str, default="deepseek-ai/deepseek-coder-1.3b-base", + help="使用的模型名称或路径") + args = parser.parse_args() + + # 创建并启动代码助手 + assistant = CodeAssistant(model_name=args.model) + assistant.start() + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/examples/deepseek1.3b_code_generation/deepseek_coder_code_generation.py b/examples/deepseek1.3b_code_generation/deepseek_coder_code_generation.py new file mode 100644 index 000000000..97d7ad74a --- /dev/null +++ b/examples/deepseek1.3b_code_generation/deepseek_coder_code_generation.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python +# coding=utf-8 +""" +使用 DeepSeek Coder 模型生成代码示例 +""" + +import argparse +from mindnlp.transformers import AutoModelForCausalLM, AutoTokenizer + +def parse_args(): + parser = argparse.ArgumentParser(description="使用 DeepSeek Coder 生成代码") + parser.add_argument( + "--model_name_or_path", + type=str, + default="deepseek-ai/deepseek-coder-1.3b-base", + help="预训练模型名称或路径,默认为 deepseek-coder-1.3b-base", + ) + parser.add_argument( + "--prompt", + type=str, + default="编写一个Python函数,实现快速排序算法", + help="用于生成代码的提示文本", + ) + parser.add_argument( + "--max_length", + type=int, + default=500, + help="生成的最大长度", + ) + parser.add_argument( + "--temperature", + type=float, + default=0.7, + help="生成的温度,较高的值会使输出更加随机,较低的值使其更加集中和确定", + ) + parser.add_argument( + "--top_p", + type=float, + default=0.95, + help="nucleus采样的概率阈值", + ) + parser.add_argument( + "--top_k", + type=int, + default=50, + help="取前k个候选的限制", + ) + args = parser.parse_args() + return args + +def main(): + args = parse_args() + + # 加载模型和分词器 + print(f"加载模型和分词器: {args.model_name_or_path}") + tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) + model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path) + + # 准备提示 + prompt = args.prompt + + # 添加前缀以获得更好的代码生成效果 + if not prompt.startswith("```"): + if "python" in prompt.lower(): + prompt = f"```python\n# {prompt}\n" + else: + prompt = f"```python\n# {prompt}\n" + + # 分词 + inputs = tokenizer(prompt, return_tensors="ms") + + # 生成代码 + print(f"使用提示:'{args.prompt}' 生成代码...") + generated_ids = model.generate( + inputs.input_ids, + max_length=args.max_length, + temperature=args.temperature, + top_p=args.top_p, + top_k=args.top_k, + do_sample=True, + ) + + # 解码生成的代码 + generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) + + # 打印生成的代码 + print("\n生成的代码:") + print("-" * 50) + print(generated_text) + print("-" * 50) + + # 提取代码部分(如果有```标记的话) + if "```" in generated_text: + code_start = generated_text.find("```") + 3 + language_end = generated_text.find("\n", code_start) + code_end = generated_text.find("```", language_end) + if code_end == -1: # 如果没有结束的``` + code = generated_text[language_end+1:] + else: + code = generated_text[language_end+1:code_end].strip() + + print("\n提取的纯代码:") + print("-" * 50) + print(code) + print("-" * 50) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/examples/deepseek1.3b_code_generation/deepseek_coder_finetuning.py b/examples/deepseek1.3b_code_generation/deepseek_coder_finetuning.py new file mode 100644 index 000000000..90e5b7b47 --- /dev/null +++ b/examples/deepseek1.3b_code_generation/deepseek_coder_finetuning.py @@ -0,0 +1,240 @@ +#!/usr/bin/env python +# coding=utf-8 +""" +DeepSeek Coder 模型在特定代码数据集上的微调示例 +""" + +import logging +import os +import argparse +import math +from dataclasses import dataclass, field +from typing import Dict, Optional, List, Union + +import mindspore +from mindspore import nn +from mindspore.dataset import GeneratorDataset + +from mindnlp.transformers import ( + AutoConfig, + AutoModelForCausalLM, + AutoTokenizer, + TrainingArguments, + Trainer, + set_seed, +) +from mindnlp.transformers.data.data_collator import DataCollatorForLanguageModeling +from mindnlp.transformers.optimization import get_scheduler, AdamWeightDecay + +logger = logging.getLogger(__name__) + +@dataclass +class ModelArguments: + """ + 模型参数 + """ + model_name_or_path: str = field( + default="deepseek-ai/deepseek-coder-1.3b-base", + metadata={"help": "预训练模型的路径或标识符"} + ) + use_lora: bool = field( + default=False, + metadata={"help": "是否使用LoRA进行参数高效微调"} + ) + lora_rank: int = field( + default=8, + metadata={"help": "LoRA的秩"} + ) + lora_alpha: int = field( + default=16, + metadata={"help": "LoRA的alpha参数"} + ) + lora_dropout: float = field( + default=0.05, + metadata={"help": "LoRA的dropout率"} + ) + +@dataclass +class DataTrainingArguments: + """ + 数据训练参数 + """ + train_file: Optional[str] = field( + default=None, metadata={"help": "训练数据文件的路径"} + ) + validation_file: Optional[str] = field( + default=None, metadata={"help": "验证数据文件的路径"} + ) + max_seq_length: Optional[int] = field( + default=512, + metadata={ + "help": "单个样本的最大总序列长度。序列将被截断为该长度。" + }, + ) + preprocessing_num_workers: Optional[int] = field( + default=None, + metadata={"help": "用于数据预处理的进程数"}, + ) + overwrite_cache: bool = field( + default=False, metadata={"help": "是否覆盖缓存的预处理数据集"} + ) + block_size: Optional[int] = field( + default=None, + metadata={ + "help": "用于划分训练样本的可选输入序列长度" + }, + ) + +class CodeDataset: + """代码数据集类""" + + def __init__(self, file_path, tokenizer, block_size): + self.examples = [] + self.tokenizer = tokenizer + self.block_size = block_size + + # 读取并处理数据 + logger.info(f"正在读取数据文件: {file_path}") + with open(file_path, encoding="utf-8") as f: + text = f.read() + + # 分割为代码样本 + code_samples = text.split("# ---NEW SAMPLE---") + + for code in code_samples: + if len(code.strip()) > 0: + tokenized_code = self.tokenizer.encode(code.strip()) + self.examples.extend(self._get_chunks(tokenized_code)) + + def _get_chunks(self, tokenized_code): + chunks = [] + for i in range(0, len(tokenized_code), self.block_size): + chunk = tokenized_code[i:i + self.block_size] + if len(chunk) == self.block_size: + chunks.append({"input_ids": chunk}) + return chunks + + def __len__(self): + return len(self.examples) + + def __getitem__(self, idx): + return self.examples[idx] + +def main(): + parser = argparse.ArgumentParser(description="微调 DeepSeek Coder 模型") + + # 添加模型参数 + model_args_group = parser.add_argument_group("模型参数") + model_args_group.add_argument("--model_name_or_path", type=str, default="deepseek-ai/deepseek-coder-1.3b-base") + model_args_group.add_argument("--use_lora", action="store_true") + model_args_group.add_argument("--lora_rank", type=int, default=8) + model_args_group.add_argument("--lora_alpha", type=int, default=16) + model_args_group.add_argument("--lora_dropout", type=float, default=0.05) + + # 添加数据参数 + data_args_group = parser.add_argument_group("数据参数") + data_args_group.add_argument("--train_file", type=str, required=True) + data_args_group.add_argument("--validation_file", type=str) + data_args_group.add_argument("--max_seq_length", type=int, default=512) + data_args_group.add_argument("--block_size", type=int, default=None) + data_args_group.add_argument("--overwrite_cache", action="store_true") + data_args_group.add_argument("--preprocessing_num_workers", type=int, default=None) + + # 添加训练参数 + training_args_group = parser.add_argument_group("训练参数") + training_args_group.add_argument("--output_dir", type=str, required=True) + training_args_group.add_argument("--num_train_epochs", type=int, default=3) + training_args_group.add_argument("--per_device_train_batch_size", type=int, default=8) + training_args_group.add_argument("--per_device_eval_batch_size", type=int, default=8) + training_args_group.add_argument("--gradient_accumulation_steps", type=int, default=1) + training_args_group.add_argument("--learning_rate", type=float, default=5e-5) + training_args_group.add_argument("--weight_decay", type=float, default=0.01) + training_args_group.add_argument("--warmup_ratio", type=float, default=0.1) + training_args_group.add_argument("--logging_steps", type=int, default=10) + training_args_group.add_argument("--save_steps", type=int, default=500) + training_args_group.add_argument("--seed", type=int, default=42) + + args = parser.parse_args() + + # 设置随机种子 + set_seed(args.seed) + + # 加载模型和分词器 + logger.info(f"加载模型和分词器: {args.model_name_or_path}") + config = AutoConfig.from_pretrained(args.model_name_or_path) + tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) + model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path) + + # 如果使用LoRA进行参数高效微调 + if args.use_lora: + # 注意:这里需要实现LoRA的集成,这是一个简化版 + logger.info(f"使用LoRA进行参数高效微调,rank={args.lora_rank}, alpha={args.lora_alpha}") + # 这里应添加LoRA相关配置和实现 + + # 确定block_size + block_size = args.block_size + if block_size is None: + block_size = min(tokenizer.model_max_length, args.max_seq_length) + + # 准备数据集 + train_dataset = CodeDataset(args.train_file, tokenizer, block_size) + eval_dataset = None + if args.validation_file: + eval_dataset = CodeDataset(args.validation_file, tokenizer, block_size) + + # 数据整理器 + data_collator = DataCollatorForLanguageModeling( + tokenizer=tokenizer, + mlm=False, + ) + + # 优化器 + optimizer = AdamWeightDecay( + params=model.trainable_params(), + learning_rate=args.learning_rate, + weight_decay=args.weight_decay + ) + + # 训练参数 + training_args = TrainingArguments( + output_dir=args.output_dir, + overwrite_output_dir=True, + num_train_epochs=args.num_train_epochs, + per_device_train_batch_size=args.per_device_train_batch_size, + per_device_eval_batch_size=args.per_device_eval_batch_size, + gradient_accumulation_steps=args.gradient_accumulation_steps, + learning_rate=args.learning_rate, + weight_decay=args.weight_decay, + warmup_ratio=args.warmup_ratio, + logging_steps=args.logging_steps, + save_steps=args.save_steps, + save_total_limit=2, + ) + + # 初始化Trainer + trainer = Trainer( + model=model, + args=training_args, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + data_collator=data_collator, + optimizers=(optimizer, None), # (optimizer, scheduler) + ) + + # 开始训练 + logger.info("开始微调") + trainer.train() + + # 保存模型 + logger.info(f"保存微调后的模型到 {args.output_dir}") + trainer.save_model() + tokenizer.save_pretrained(args.output_dir) + +if __name__ == "__main__": + # 设置日志级别 + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO, + ) + main() \ No newline at end of file diff --git a/examples/deepseek1.3b_code_generation/deepseek_coder_tutorial.ipynb b/examples/deepseek1.3b_code_generation/deepseek_coder_tutorial.ipynb new file mode 100644 index 000000000..44a927780 --- /dev/null +++ b/examples/deepseek1.3b_code_generation/deepseek_coder_tutorial.ipynb @@ -0,0 +1,366 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# DeepSeek Coder 代码生成教程\n", + "\n", + "本教程展示如何使用 MindNLP 中的 DeepSeek Coder 模型进行代码生成。DeepSeek Coder 是一个优秀的代码生成模型,专门针对编程领域进行了训练,可以根据自然语言描述生成高质量代码。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. 安装依赖\n", + "\n", + "首先确保已安装 MindNLP:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. 加载 DeepSeek Coder 模型\n", + "\n", + "我们使用 MindNLP 的 AutoModel 和 AutoTokenizer 类来加载 DeepSeek Coder 模型。" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "edd3e74102dd49e6b4fec8f3d4d19134", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0.00/281 [00:00 9\u001b[0m model \u001b[38;5;241m=\u001b[39m \u001b[43mAutoModelForCausalLM\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfrom_pretrained\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 10\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodel_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 11\u001b[0m \u001b[43m \u001b[49m\u001b[43mfrom_pt\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# 关键参数\u001b[39;49;00m\n\u001b[1;32m 12\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/桌面/mindnlp/mindnlp/transformers/models/auto/auto_factory.py:510\u001b[0m, in \u001b[0;36m_BaseAutoModelClass.from_pretrained\u001b[0;34m(cls, pretrained_model_name_or_path, *model_args, **kwargs)\u001b[0m\n\u001b[1;32m 508\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mtype\u001b[39m(config) \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m_model_mapping\u001b[38;5;241m.\u001b[39mkeys():\n\u001b[1;32m 509\u001b[0m model_class \u001b[38;5;241m=\u001b[39m _get_model_class(config, \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m_model_mapping)\n\u001b[0;32m--> 510\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mmodel_class\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfrom_pretrained\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 511\u001b[0m \u001b[43m \u001b[49m\u001b[43mpretrained_model_name_or_path\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mmodel_args\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mhub_kwargs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\n\u001b[1;32m 512\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 513\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 514\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUnrecognized configuration class \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mconfig\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m for this kind of AutoModel: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 515\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mModel type should be one of \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m, \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;241m.\u001b[39mjoin(c\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mfor\u001b[39;00m\u001b[38;5;250m \u001b[39mc\u001b[38;5;250m \u001b[39m\u001b[38;5;129;01min\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m_model_mapping\u001b[38;5;241m.\u001b[39mkeys())\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 516\u001b[0m )\n", + "File \u001b[0;32m~/桌面/mindnlp/mindnlp/transformers/modeling_utils.py:2925\u001b[0m, in \u001b[0;36mPreTrainedModel.from_pretrained\u001b[0;34m(cls, pretrained_model_name_or_path, config, cache_dir, ignore_mismatched_sizes, force_download, local_files_only, token, revision, use_safetensors, *model_args, **kwargs)\u001b[0m\n\u001b[1;32m 2923\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m from_pt:\n\u001b[1;32m 2924\u001b[0m filename \u001b[38;5;241m=\u001b[39m _add_variant(PT_WEIGHTS_NAME, variant)\n\u001b[0;32m-> 2925\u001b[0m resolved_archive_file \u001b[38;5;241m=\u001b[39m \u001b[43mcached_file\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2926\u001b[0m \u001b[43m \u001b[49m\u001b[43mpretrained_model_name_or_path\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfilename\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mcached_file_kwargs\u001b[49m\n\u001b[1;32m 2927\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2928\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 2929\u001b[0m \u001b[38;5;66;03m# This repo has no safetensors file of any kind, we switch to PyTorch.\u001b[39;00m\n\u001b[1;32m 2930\u001b[0m filename \u001b[38;5;241m=\u001b[39m _add_variant(WEIGHTS_NAME, variant)\n", + "File \u001b[0;32m~/桌面/mindnlp/mindnlp/utils/download.py:527\u001b[0m, in \u001b[0;36mcached_file\u001b[0;34m(path_or_repo_id, filename, cache_dir, force_download, resume_download, proxies, local_files_only, revision, token, subfolder, mirror, repo_type, user_agent, _raise_exceptions_for_gated_repo, _raise_exceptions_for_missing_entries, _raise_exceptions_for_connection_errors, _commit_hash)\u001b[0m\n\u001b[1;32m 524\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mEnvironmentError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCould not locate \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfull_filename\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m inside \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mpath_or_repo_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 525\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 526\u001b[0m \u001b[38;5;66;03m# Load from URL or cache if already cached\u001b[39;00m\n\u001b[0;32m--> 527\u001b[0m resolved_file \u001b[38;5;241m=\u001b[39m \u001b[43mdownload\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 528\u001b[0m \u001b[43m \u001b[49m\u001b[43mpath_or_repo_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 529\u001b[0m \u001b[43m \u001b[49m\u001b[43mfilename\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 530\u001b[0m \u001b[43m \u001b[49m\u001b[43msubfolder\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43msubfolder\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m==\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01melse\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43msubfolder\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 531\u001b[0m \u001b[43m \u001b[49m\u001b[43mrepo_type\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrepo_type\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 532\u001b[0m \u001b[43m \u001b[49m\u001b[43mcache_dir\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcache_dir\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 533\u001b[0m \u001b[43m \u001b[49m\u001b[43muser_agent\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muser_agent\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 534\u001b[0m \u001b[43m \u001b[49m\u001b[43mforce_download\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mforce_download\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 535\u001b[0m \u001b[43m \u001b[49m\u001b[43mproxies\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mproxies\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 536\u001b[0m \u001b[43m \u001b[49m\u001b[43mresume_download\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mresume_download\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 537\u001b[0m \u001b[43m \u001b[49m\u001b[43mlocal_files_only\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlocal_files_only\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 538\u001b[0m \u001b[43m \u001b[49m\u001b[43mrevision\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrevision\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 539\u001b[0m \u001b[43m \u001b[49m\u001b[43mtoken\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtoken\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 540\u001b[0m \u001b[43m \u001b[49m\u001b[43mmirror\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmirror\u001b[49m\n\u001b[1;32m 541\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 542\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m GatedRepoError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 543\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m _raise_exceptions_for_missing_entries:\n", + "File \u001b[0;32m~/桌面/mindnlp/mindnlp/utils/download.py:651\u001b[0m, in \u001b[0;36mdownload\u001b[0;34m(repo_id, filename, subfolder, repo_type, cache_dir, local_dir, user_agent, force_download, proxies, resume_download, local_files_only, revision, token, mirror)\u001b[0m\n\u001b[1;32m 649\u001b[0m headers \u001b[38;5;241m=\u001b[39m {}\n\u001b[1;32m 650\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 651\u001b[0m pointer_path \u001b[38;5;241m=\u001b[39m \u001b[43mthreads_exclusive_http_get\u001b[49m\u001b[43m(\u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstorage_folder\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdownload_file_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrelative_filename\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mproxies\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mproxies\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mheaders\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 652\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m exp:\n\u001b[1;32m 653\u001b[0m \u001b[38;5;66;03m# Otherwise, our Internet connection is down.\u001b[39;00m\n\u001b[1;32m 654\u001b[0m \u001b[38;5;66;03m# etag is None\u001b[39;00m\n\u001b[1;32m 655\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m exp\n", + "File \u001b[0;32m~/桌面/mindnlp/mindnlp/utils/download.py:157\u001b[0m, in \u001b[0;36mthreads_exclusive_http_get\u001b[0;34m(url, storage_folder, md5sum, download_file_name, proxies, headers)\u001b[0m\n\u001b[1;32m 155\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 156\u001b[0m fcntl\u001b[38;5;241m.\u001b[39mflock(fd, fcntl\u001b[38;5;241m.\u001b[39mLOCK_EX)\n\u001b[0;32m--> 157\u001b[0m file_path \u001b[38;5;241m=\u001b[39m \u001b[43mhttp_get\u001b[49m\u001b[43m(\u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpath\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstorage_folder\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdownload_file_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_file_name\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mproxies\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mproxies\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mheaders\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 158\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m file_path\n\u001b[1;32m 159\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m exp:\n", + "File \u001b[0;32m~/桌面/mindnlp/mindnlp/utils/download.py:244\u001b[0m, in \u001b[0;36mhttp_get\u001b[0;34m(url, path, md5sum, download_file_name, proxies, headers)\u001b[0m\n\u001b[1;32m 240\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mopen\u001b[39m(tmp_file_path, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mab\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m file_size \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m0\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mwb\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;28;01mas\u001b[39;00m file:\n\u001b[1;32m 241\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m tqdm(\n\u001b[1;32m 242\u001b[0m total\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mint\u001b[39m(total_size), unit\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mB\u001b[39m\u001b[38;5;124m\"\u001b[39m, initial\u001b[38;5;241m=\u001b[39mfile_size, unit_scale\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m, unit_divisor\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1024\u001b[39m\n\u001b[1;32m 243\u001b[0m ) \u001b[38;5;28;01mas\u001b[39;00m pbar:\n\u001b[0;32m--> 244\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m chunk \u001b[38;5;129;01min\u001b[39;00m req\u001b[38;5;241m.\u001b[39miter_content(chunk_size\u001b[38;5;241m=\u001b[39mchunk_size):\n\u001b[1;32m 245\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m chunk:\n\u001b[1;32m 246\u001b[0m file\u001b[38;5;241m.\u001b[39mwrite(chunk)\n", + "File \u001b[0;32m~/anaconda3/envs/mindspore_py39/lib/python3.9/site-packages/requests/models.py:820\u001b[0m, in \u001b[0;36mResponse.iter_content..generate\u001b[0;34m()\u001b[0m\n\u001b[1;32m 818\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mhasattr\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mraw, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mstream\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[1;32m 819\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 820\u001b[0m \u001b[38;5;28;01myield from\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mraw\u001b[38;5;241m.\u001b[39mstream(chunk_size, decode_content\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[1;32m 821\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m ProtocolError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 822\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m ChunkedEncodingError(e)\n", + "File \u001b[0;32m~/anaconda3/envs/mindspore_py39/lib/python3.9/site-packages/urllib3/response.py:1066\u001b[0m, in \u001b[0;36mHTTPResponse.stream\u001b[0;34m(self, amt, decode_content)\u001b[0m\n\u001b[1;32m 1064\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 1065\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m is_fp_closed(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_fp) \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_decoded_buffer) \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[0;32m-> 1066\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mamt\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mamt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdecode_content\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdecode_content\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1068\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m data:\n\u001b[1;32m 1069\u001b[0m \u001b[38;5;28;01myield\u001b[39;00m data\n", + "File \u001b[0;32m~/anaconda3/envs/mindspore_py39/lib/python3.9/site-packages/urllib3/response.py:955\u001b[0m, in \u001b[0;36mHTTPResponse.read\u001b[0;34m(self, amt, decode_content, cache_content)\u001b[0m\n\u001b[1;32m 952\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_decoded_buffer) \u001b[38;5;241m>\u001b[39m\u001b[38;5;241m=\u001b[39m amt:\n\u001b[1;32m 953\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_decoded_buffer\u001b[38;5;241m.\u001b[39mget(amt)\n\u001b[0;32m--> 955\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_raw_read\u001b[49m\u001b[43m(\u001b[49m\u001b[43mamt\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 957\u001b[0m flush_decoder \u001b[38;5;241m=\u001b[39m amt \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mor\u001b[39;00m (amt \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m0\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m data)\n\u001b[1;32m 959\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m data \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_decoded_buffer) \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m:\n", + "File \u001b[0;32m~/anaconda3/envs/mindspore_py39/lib/python3.9/site-packages/urllib3/response.py:879\u001b[0m, in \u001b[0;36mHTTPResponse._raw_read\u001b[0;34m(self, amt, read1)\u001b[0m\n\u001b[1;32m 876\u001b[0m fp_closed \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mgetattr\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_fp, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mclosed\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mFalse\u001b[39;00m)\n\u001b[1;32m 878\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_error_catcher():\n\u001b[0;32m--> 879\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_fp_read\u001b[49m\u001b[43m(\u001b[49m\u001b[43mamt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mread1\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mread1\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m fp_closed \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;124mb\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 880\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m amt \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m amt \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m0\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m data:\n\u001b[1;32m 881\u001b[0m \u001b[38;5;66;03m# Platform-specific: Buggy versions of Python.\u001b[39;00m\n\u001b[1;32m 882\u001b[0m \u001b[38;5;66;03m# Close the connection when no data is returned\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 887\u001b[0m \u001b[38;5;66;03m# not properly close the connection in all cases. There is\u001b[39;00m\n\u001b[1;32m 888\u001b[0m \u001b[38;5;66;03m# no harm in redundantly calling close.\u001b[39;00m\n\u001b[1;32m 889\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_fp\u001b[38;5;241m.\u001b[39mclose()\n", + "File \u001b[0;32m~/anaconda3/envs/mindspore_py39/lib/python3.9/site-packages/urllib3/response.py:862\u001b[0m, in \u001b[0;36mHTTPResponse._fp_read\u001b[0;34m(self, amt, read1)\u001b[0m\n\u001b[1;32m 859\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_fp\u001b[38;5;241m.\u001b[39mread1(amt) \u001b[38;5;28;01mif\u001b[39;00m amt \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_fp\u001b[38;5;241m.\u001b[39mread1()\n\u001b[1;32m 860\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 861\u001b[0m \u001b[38;5;66;03m# StringIO doesn't like amt=None\u001b[39;00m\n\u001b[0;32m--> 862\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_fp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mamt\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mif\u001b[39;00m amt \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_fp\u001b[38;5;241m.\u001b[39mread()\n", + "File \u001b[0;32m~/anaconda3/envs/mindspore_py39/lib/python3.9/http/client.py:463\u001b[0m, in \u001b[0;36mHTTPResponse.read\u001b[0;34m(self, amt)\u001b[0m\n\u001b[1;32m 460\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m amt \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 461\u001b[0m \u001b[38;5;66;03m# Amount is given, implement using readinto\u001b[39;00m\n\u001b[1;32m 462\u001b[0m b \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mbytearray\u001b[39m(amt)\n\u001b[0;32m--> 463\u001b[0m n \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mreadinto\u001b[49m\u001b[43m(\u001b[49m\u001b[43mb\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 464\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mmemoryview\u001b[39m(b)[:n]\u001b[38;5;241m.\u001b[39mtobytes()\n\u001b[1;32m 465\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 466\u001b[0m \u001b[38;5;66;03m# Amount is not given (unbounded read) so we must check self.length\u001b[39;00m\n\u001b[1;32m 467\u001b[0m \u001b[38;5;66;03m# and self.chunked\u001b[39;00m\n", + "File \u001b[0;32m~/anaconda3/envs/mindspore_py39/lib/python3.9/http/client.py:507\u001b[0m, in \u001b[0;36mHTTPResponse.readinto\u001b[0;34m(self, b)\u001b[0m\n\u001b[1;32m 502\u001b[0m b \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mmemoryview\u001b[39m(b)[\u001b[38;5;241m0\u001b[39m:\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlength]\n\u001b[1;32m 504\u001b[0m \u001b[38;5;66;03m# we do not use _safe_read() here because this may be a .will_close\u001b[39;00m\n\u001b[1;32m 505\u001b[0m \u001b[38;5;66;03m# connection, and the user is reading more bytes than will be provided\u001b[39;00m\n\u001b[1;32m 506\u001b[0m \u001b[38;5;66;03m# (for example, reading in 1k chunks)\u001b[39;00m\n\u001b[0;32m--> 507\u001b[0m n \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mreadinto\u001b[49m\u001b[43m(\u001b[49m\u001b[43mb\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 508\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m n \u001b[38;5;129;01mand\u001b[39;00m b:\n\u001b[1;32m 509\u001b[0m \u001b[38;5;66;03m# Ideally, we would raise IncompleteRead if the content-length\u001b[39;00m\n\u001b[1;32m 510\u001b[0m \u001b[38;5;66;03m# wasn't satisfied, but it might break compatibility.\u001b[39;00m\n\u001b[1;32m 511\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_close_conn()\n", + "File \u001b[0;32m~/anaconda3/envs/mindspore_py39/lib/python3.9/socket.py:704\u001b[0m, in \u001b[0;36mSocketIO.readinto\u001b[0;34m(self, b)\u001b[0m\n\u001b[1;32m 702\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[1;32m 703\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 704\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_sock\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrecv_into\u001b[49m\u001b[43m(\u001b[49m\u001b[43mb\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 705\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m timeout:\n\u001b[1;32m 706\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_timeout_occurred \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n", + "File \u001b[0;32m~/anaconda3/envs/mindspore_py39/lib/python3.9/ssl.py:1275\u001b[0m, in \u001b[0;36mSSLSocket.recv_into\u001b[0;34m(self, buffer, nbytes, flags)\u001b[0m\n\u001b[1;32m 1271\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m flags \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[1;32m 1272\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 1273\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnon-zero flags not allowed in calls to recv_into() on \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m%\u001b[39m\n\u001b[1;32m 1274\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m)\n\u001b[0;32m-> 1275\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnbytes\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbuffer\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1276\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 1277\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28msuper\u001b[39m()\u001b[38;5;241m.\u001b[39mrecv_into(buffer, nbytes, flags)\n", + "File \u001b[0;32m~/anaconda3/envs/mindspore_py39/lib/python3.9/ssl.py:1133\u001b[0m, in \u001b[0;36mSSLSocket.read\u001b[0;34m(self, len, buffer)\u001b[0m\n\u001b[1;32m 1131\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1132\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m buffer \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m-> 1133\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_sslobj\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbuffer\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1134\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 1135\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_sslobj\u001b[38;5;241m.\u001b[39mread(\u001b[38;5;28mlen\u001b[39m)\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] + } + ], + "source": [ + "import sys\n", + "sys.path.append(\"C:/Users/25904/Desktop/新建文件夹 (2)/mindnlp\")\n", + "from mindnlp.transformers import AutoModelForCausalLM, AutoTokenizer\n", + "import mindspore\n", + "# 加载tokenizer和模型\n", + "model_name = \"deepseek-ai/deepseek-coder-1.3b-base\"\n", + "tokenizer = AutoTokenizer.from_pretrained(model_name)\n", + " # 修改加载代码\n", + "model = AutoModelForCausalLM.from_pretrained(\n", + " model_name,\n", + " from_pt=True # 关键参数\n", + " )\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. 基础代码生成\n", + "\n", + "让我们首先尝试一个简单的代码生成示例:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_code(prompt, max_length=500, temperature=0.7, top_p=0.95, top_k=50):\n", + " # 添加前缀以获得更好的代码生成效果\n", + " if not prompt.startswith(\"```\"):\n", + " if \"python\" in prompt.lower():\n", + " prompt = f\"```python\\n# {prompt}\\n\"\n", + " else:\n", + " prompt = f\"```python\\n# {prompt}\\n\"\n", + " \n", + " inputs = tokenizer(prompt, return_tensors=\"ms\")\n", + " \n", + " generated_ids = model.generate(\n", + " inputs.input_ids,\n", + " max_length=max_length,\n", + " temperature=temperature,\n", + " top_p=top_p,\n", + " top_k=top_k,\n", + " do_sample=True,\n", + " )\n", + " \n", + " generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)\n", + " return generated_text\n", + "\n", + "# 尝试生成一个简单的Python函数\n", + "prompt = \"编写一个计算斐波那契数列的函数\"\n", + "generated_code = generate_code(prompt)\n", + "print(generated_code)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4. 高级代码生成示例\n", + "\n", + "下面我们来尝试一些更复杂的代码生成任务:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 实现一个数据结构\n", + "prompt = \"实现一个二叉搜索树的Python类,包括插入、查找和删除操作\"\n", + "bst_code = generate_code(prompt, max_length=800)\n", + "print(bst_code)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 数据处理任务\n", + "prompt = \"编写一个函数,读取CSV文件并使用pandas进行数据清洗和分析\"\n", + "data_analysis_code = generate_code(prompt, max_length=700)\n", + "print(data_analysis_code)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 5. 调整生成参数\n", + "\n", + "我们可以通过调整生成参数来控制代码生成的多样性和质量:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 使用较低的temperature获得更确定性的结果\n", + "prompt = \"编写一个实现快速排序的Python函数\"\n", + "deterministic_code = generate_code(prompt, temperature=0.2)\n", + "print(\"温度=0.2的生成结果:\")\n", + "print(deterministic_code)\n", + "\n", + "# 使用较高的temperature获得更多样化的结果\n", + "creative_code = generate_code(prompt, temperature=1.0)\n", + "print(\"\\n温度=1.0的生成结果:\")\n", + "print(creative_code)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 6. 提取生成的代码\n", + "\n", + "通常,我们需要从生成的文本中提取纯代码部分:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def extract_code(generated_text):\n", + " \"\"\"从生成的文本中提取代码部分\"\"\"\n", + " if \"```\" in generated_text:\n", + " code_start = generated_text.find(\"```\") + 3\n", + " language_end = generated_text.find(\"\\n\", code_start)\n", + " code_end = generated_text.find(\"```\", language_end)\n", + " if code_end == -1: # 如果没有结束的```\n", + " code = generated_text[language_end+1:]\n", + " else:\n", + " code = generated_text[language_end+1:code_end].strip()\n", + " return code\n", + " return generated_text\n", + "\n", + "prompt = \"实现一个计算两个日期之间天数的Python函数\"\n", + "generated_text = generate_code(prompt)\n", + "print(\"原始生成文本:\")\n", + "print(\"-\" * 50)\n", + "print(generated_text)\n", + "print(\"-\" * 50)\n", + "\n", + "code = extract_code(generated_text)\n", + "print(\"\\n提取的纯代码:\")\n", + "print(\"-\" * 50)\n", + "print(code)\n", + "print(\"-\" * 50)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 7. 代码生成应用示例\n", + "\n", + "最后,我们来看一个实际的应用示例 - 自动生成一个简单的Web应用:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "prompt = \"\"\"\n", + "使用Flask创建一个简单的API服务,包含以下功能:\n", + "1. 提供一个GET /health端点,返回服务健康状态\n", + "2. 提供一个POST /api/predict端点,接收JSON数据,包含一个\"text\"字段\n", + "3. 返回文本的长度和单词数量\n", + "\"\"\"\n", + "\n", + "flask_app_code = generate_code(prompt, max_length=1000)\n", + "extracted_code = extract_code(flask_app_code)\n", + "print(extracted_code)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 8. 总结\n", + "\n", + "在本教程中,我们学习了如何使用MindNLP中的DeepSeek Coder模型进行代码生成。我们探索了:\n", + "\n", + "- 加载和使用DeepSeek Coder模型\n", + "- 为不同复杂度的任务生成代码\n", + "- 调整生成参数控制代码质量和多样性\n", + "- 从生成的文本中提取纯代码\n", + "- 实际应用示例\n", + "\n", + "DeepSeek Coder是一个强大的代码生成工具,可以帮助开发者提高编程效率,尤其适合解决标准编程任务和快速原型开发。" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "mindspore_py39", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.19" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/mindnlp/transformers/models/deepseek_coder/__init__.py b/mindnlp/transformers/models/deepseek_coder/__init__.py new file mode 100644 index 000000000..e798261b1 --- /dev/null +++ b/mindnlp/transformers/models/deepseek_coder/__init__.py @@ -0,0 +1,26 @@ +# Copyright 2024 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +DeepSeek Coder Model init +""" +from . import modeling_deepseek_coder, configuration_deepseek_coder, tokenization_deepseek_coder_fast +from .modeling_deepseek_coder import * +from .configuration_deepseek_coder import * +from .tokenization_deepseek_coder_fast import * + +__all__ = [] +__all__.extend(modeling_deepseek_coder.__all__) +__all__.extend(configuration_deepseek_coder.__all__) +__all__.extend(tokenization_deepseek_coder_fast.__all__) \ No newline at end of file diff --git a/mindnlp/transformers/models/deepseek_coder/configuration_deepseek_coder.py b/mindnlp/transformers/models/deepseek_coder/configuration_deepseek_coder.py new file mode 100644 index 000000000..43dcc575b --- /dev/null +++ b/mindnlp/transformers/models/deepseek_coder/configuration_deepseek_coder.py @@ -0,0 +1,94 @@ +# coding=utf-8 +# Copyright 2024 Huawei Technologies Co., Ltd +# Copyright 2023 DeepSeek-AI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""DeepSeek Coder model configuration""" + +from mindnlp.utils import logging +from mindnlp.transformers.configuration_utils import PretrainedConfig + +logger = logging.get_logger(__name__) + +DEEPSEEK_CODER_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "deepseek-ai/deepseek-coder-1.3b-base": "https://huggingface.co/deepseek-ai/deepseek-coder-1.3b-base/resolve/main/config.json", +} + + +class DeepseekCoderConfig(PretrainedConfig): + """ + This is the configuration class to store the configuration of a [`DeepseekCoderModel`]. It is used to instantiate an DeepSeekCoder + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the DeepSeek Coder 1.3B base model. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information.""" + + model_type = "deepseek_coder" + keys_to_ignore_at_inference = ["past_key_values"] + + def __init__( + self, + vocab_size=32000, + hidden_size=2048, + intermediate_size=5504, + num_hidden_layers=24, + num_attention_heads=16, + num_key_value_heads=16, + hidden_act="silu", + max_position_embeddings=16384, + initializer_range=0.02, + rms_norm_eps=1e-6, + use_cache=True, + pad_token_id=None, + bos_token_id=1, + eos_token_id=2, + pretraining_tp=1, + tie_word_embeddings=False, + rope_theta=10000.0, + rope_scaling=None, + attention_bias=False, + attention_dropout=0.0, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + # for backward compatibility + if num_key_value_heads is None: + num_key_value_heads = num_attention_heads + + self.num_key_value_heads = num_key_value_heads + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.rms_norm_eps = rms_norm_eps + self.pretraining_tp = pretraining_tp + self.use_cache = use_cache + self.rope_theta = rope_theta + self.rope_scaling = rope_scaling + self.attention_bias = attention_bias + self.attention_dropout = attention_dropout + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) + + +__all__ = ["DeepseekCoderConfig"] \ No newline at end of file diff --git a/mindnlp/transformers/models/deepseek_coder/modeling_deepseek_coder.py b/mindnlp/transformers/models/deepseek_coder/modeling_deepseek_coder.py new file mode 100644 index 000000000..ac9d77fce --- /dev/null +++ b/mindnlp/transformers/models/deepseek_coder/modeling_deepseek_coder.py @@ -0,0 +1,1587 @@ +# coding=utf-8 +# Copyright 2024 Huawei Technologies Co., Ltd +# Copyright 2023 DeepSeek-AI and The HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" MindNLP DeepSeek Coder model.""" +import math +import warnings +from typing import List, Optional, Tuple, Union +import mindspore +from mindnlp.core import nn, ops +from mindnlp.core.nn import functional as F +from mindnlp.common.activations import ACT2FN +from mindnlp.transformers.cache_utils import Cache, DynamicCache +from mindnlp.transformers.modeling_attn_mask_utils import ( + _prepare_4d_causal_attention_mask +) +from mindnlp.transformers.modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, + SequenceClassifierOutputWithPast, +) + +from mindnlp.transformers.modeling_utils import PreTrainedModel +from mindnlp.transformers.ms_utils import ( + ALL_LAYERNORM_LAYERS, +) + +from mindnlp.utils import ( + logging, +) + +from .configuration_deepseek_coder import DeepseekCoderConfig + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "DeepseekCoderConfig" + + +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=mindspore.int32) + indices = ops.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad( + ops.cumsum(seqlens_in_batch, dim=0, dtype=mindspore.int32), (1, 0) + ) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +class DeepseekCoderRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + DeepseekCoderRMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(ops.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(mindspore.float32) + variance = hidden_states.pow(2).mean(-1, keep_dims=True) + hidden_states = hidden_states * ops.rsqrt(variance + self.variance_epsilon) + return self.weight * hidden_states.to(input_dtype) + + +ALL_LAYERNORM_LAYERS.append(DeepseekCoderRMSNorm) + + +class DeepseekCoderRotaryEmbedding(nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000): + super().__init__() + + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + inv_freq = 1.0 / ( + self.base ** (ops.arange(0, self.dim, 2).float() / self.dim) + ) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache( + seq_len=max_position_embeddings, + dtype=ops.get_default_dtype(), + ) + self.max_seq_len_cached = None + + def _set_cos_sin_cache(self, seq_len, dtype): + self.max_seq_len_cached = seq_len + t = ops.arange( + self.max_seq_len_cached, dtype=self.inv_freq.dtype + ) + + freqs = ops.outer(t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = ops.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if self.max_seq_len_cached is None or seq_len > self.max_seq_len_cached: + self._set_cos_sin_cache(seq_len=seq_len, dtype=x.dtype) + + return ( + self.cos_cached[:seq_len].to(dtype=x.dtype), + self.sin_cached[:seq_len].to(dtype=x.dtype), + ) + + +# Copied from transformers.models.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->DeepseekCoder +class DeepseekCoderLinearScalingRotaryEmbedding(DeepseekCoderRotaryEmbedding): + """DeepseekCoderRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" + + def __init__( + self, + dim, + max_position_embeddings=2048, + base=10000, + scaling_factor=1.0, + ): + self.scaling_factor = scaling_factor + super().__init__(dim, max_position_embeddings, base) + + def _set_cos_sin_cache(self, seq_len, dtype): + self.max_seq_len_cached = seq_len + t = ops.arange( + self.max_seq_len_cached, dtype=self.inv_freq.dtype + ) + t = t / self.scaling_factor + + freqs = ops.outer(t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = ops.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + +# Copied from transformers.models.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->DeepseekCoder +class DeepseekCoderDynamicNTKScalingRotaryEmbedding(DeepseekCoderRotaryEmbedding): + """DeepseekCoderRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" + + def __init__( + self, + dim, + max_position_embeddings=2048, + base=10000, + scaling_factor=1.0, + ): + self.scaling_factor = scaling_factor + super().__init__(dim, max_position_embeddings, base) + + def _set_cos_sin_cache(self, seq_len, dtype): + self.max_seq_len_cached = seq_len + + if seq_len > self.max_position_embeddings: + base = self.base * ( + (self.scaling_factor * seq_len / self.max_position_embeddings) + - (self.scaling_factor - 1) + ) ** (self.dim / (self.dim - 2)) + inv_freq = 1.0 / ( + base ** (ops.arange(0, self.dim, 2).float() / self.dim) + ) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + t = ops.arange( + self.max_seq_len_cached, dtype=self.inv_freq.dtype + ) + + freqs = ops.outer(t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = ops.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + +# Inverse dim formula to find dim based on number of rotations +def yarn_find_correction_dim( + num_rotations, dim, base=10000, max_position_embeddings=2048 +): + return (dim * math.log(max_position_embeddings / (num_rotations * 2 * math.pi))) / ( + 2 * math.log(base) + ) + + +# Find dim range bounds based on rotations +def yarn_find_correction_range( + low_rot, high_rot, dim, base=10000, max_position_embeddings=2048 +): + low = math.floor( + yarn_find_correction_dim(low_rot, dim, base, max_position_embeddings) + ) + high = math.ceil( + yarn_find_correction_dim(high_rot, dim, base, max_position_embeddings) + ) + return max(low, 0), min(high, dim - 1) # Clamp values just in case + + +def yarn_get_mscale(scale=1, mscale=1): + if scale <= 1: + return 1.0 + return 0.1 * mscale * math.log(scale) + 1.0 + + +def yarn_linear_ramp_mask(min, max, dim): + if min == max: + max += 0.001 # Prevent singularity + + linear_func = (ops.arange(dim, dtype=mindspore.float32) - min) / (max - min) + ramp_func = ops.clamp(linear_func, 0, 1) + return ramp_func + + +class DeepseekCoderYarnRotaryEmbedding(DeepseekCoderRotaryEmbedding): + + def __init__( + self, + dim, + max_position_embeddings=2048, + base=10000, + scaling_factor=1.0, + original_max_position_embeddings=4096, + beta_fast=32, + beta_slow=1, + mscale=1, + mscale_all_dim=0, + ): + self.scaling_factor = scaling_factor + self.original_max_position_embeddings = original_max_position_embeddings + self.beta_fast = beta_fast + self.beta_slow = beta_slow + self.mscale = mscale + self.mscale_all_dim = mscale_all_dim + super().__init__(dim, max_position_embeddings, base) + + def _set_cos_sin_cache(self, seq_len, dtype): + self.max_seq_len_cached = seq_len + dim = self.dim + + freq_extra = 1.0 / ( + self.base + ** (ops.arange(0, dim, 2, dtype=mindspore.float32) / dim) + ) + freq_inter = 1.0 / ( + self.scaling_factor + * self.base + ** (ops.arange(0, dim, 2, dtype=mindspore.float32) / dim) + ) + + low, high = yarn_find_correction_range( + self.beta_fast, + self.beta_slow, + dim, + self.base, + self.original_max_position_embeddings, + ) + inv_freq_mask = 1.0 - yarn_linear_ramp_mask(low, high, dim // 2).to(dtype=mindspore.float32) + inv_freq = freq_inter * (1 - inv_freq_mask) + freq_extra * inv_freq_mask + self.register_buffer("inv_freq", inv_freq, persistent=False) + + t = ops.arange(seq_len, dtype=mindspore.float32) + + freqs = ops.outer(t, inv_freq) + + _mscale = float( + yarn_get_mscale(self.scaling_factor, self.mscale) + / yarn_get_mscale(self.scaling_factor, self.mscale_all_dim) + ) + + emb = ops.cat((freqs, freqs), dim=-1) + self.register_buffer( + "cos_cached", (emb.cos() * _mscale).to(dtype), persistent=False + ) + self.register_buffer( + "sin_cached", (emb.sin() * _mscale).to(dtype), persistent=False + ) + + +# Copied from transformers.models.llama.modeling_llama.rotate_half +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return ops.cat((-x2, x1), dim=-1) + + +# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb +def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`mindspore.Tensor`): The query tensor. + k (`mindspore.Tensor`): The key tensor. + cos (`mindspore.Tensor`): The cosine part of the rotary embedding. + sin (`mindspore.Tensor`): The sine part of the rotary embedding. + position_ids (`mindspore.Tensor`): + The position indices of the tokens corresponding to the query and key tensors. For example, this can be + used to pass offsetted position ids when working with a KV-cache. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(mindspore.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos[position_ids].unsqueeze(unsqueeze_dim) + sin = sin[position_ids].unsqueeze(unsqueeze_dim) + + b, h, s, d = q.shape + q = ops.transpose(q.view(b, h, s, d // 2, 2), 4, 3).reshape(b, h, s, d) + + b, h, s, d = k.shape + k = ops.transpose(k.view(b, h, s, d // 2, 2), 4, 3).reshape(b, h, s, d) + + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +class DeepseekCoderMLP(nn.Module): + def __init__(self, config, hidden_size=None, intermediate_size=None): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size if hidden_size is None else hidden_size + self.intermediate_size = ( + config.intermediate_size if intermediate_size is None else intermediate_size + ) + + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) + self.act_fn = ACT2FN[config.hidden_act] + + def forward(self, x): + down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + return down_proj + + +class MoEGate(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.top_k = config.num_experts_per_tok + self.n_routed_experts = config.n_routed_experts + self.routed_scaling_factor = config.routed_scaling_factor + self.scoring_func = config.scoring_func + self.alpha = config.aux_loss_alpha + self.seq_aux = config.seq_aux + self.topk_method = config.topk_method + self.n_group = config.n_group + self.topk_group = config.topk_group + + # topk selection algorithm + self.norm_topk_prob = config.norm_topk_prob + self.gating_dim = config.hidden_size + self.weight = nn.Parameter( + ops.empty((self.n_routed_experts, self.gating_dim)) + ) + self.reset_parameters() + + def reset_parameters(self) -> None: + + nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) + + def forward(self, hidden_states): + bsz, seq_len, h = hidden_states.shape + ### compute gating score + hidden_states = hidden_states.view(-1, h) + logits = F.linear( + hidden_states.type(mindspore.float32), self.weight.type(mindspore.float32), None + ) + if self.scoring_func == "softmax": + scores = ops.softmax(logits, dim=-1, dtype=mindspore.float32) + else: + raise NotImplementedError( + f"insupportable scoring function for MoE gating: {self.scoring_func}" + ) + + ### select top-k experts + if self.topk_method == "greedy": + topk_weight, topk_idx = ops.topk( + scores, k=self.top_k, dim=-1, sorted=False + ) + elif self.topk_method == "group_limited_greedy": + group_scores = ( + scores.view(bsz * seq_len, self.n_group, -1).max(dim=-1).values + ) # [n, n_group] + group_idx = ops.topk( + group_scores, k=self.topk_group, dim=-1, sorted=False + )[ + 1 + ] # [n, top_k_group] + group_mask = ops.zeros_like(group_scores) # [n, n_group] + group_mask.scatter_(1, group_idx, 1) # [n, n_group] + score_mask = ( + group_mask.unsqueeze(-1) + .broadcast_to(( + bsz * seq_len, self.n_group, self.n_routed_experts // self.n_group + )) + .reshape(bsz * seq_len, -1) + ) # [n, e] + tmp_scores = scores.masked_fill(~score_mask.bool(), 0.0) # [n, e] + topk_weight, topk_idx = ops.topk( + tmp_scores, k=self.top_k, dim=-1, sorted=False + ) + + ### norm gate to sum 1 + if self.top_k > 1 and self.norm_topk_prob: + denominator = topk_weight.sum(dim=-1, keepdim=True) + 1e-20 + topk_weight = topk_weight / denominator + else: + topk_weight = topk_weight * self.routed_scaling_factor + ### expert-level computation auxiliary loss + if self.training and self.alpha > 0.0: + scores_for_aux = scores + aux_topk = self.top_k + # always compute aux loss based on the naive greedy topk method + topk_idx_for_aux_loss = topk_idx.view(bsz, -1) + if self.seq_aux: + scores_for_seq_aux = scores_for_aux.view(bsz, seq_len, -1) + ce = ops.zeros( + bsz, self.n_routed_experts) + ce.scatter_add_( + 1, + topk_idx_for_aux_loss, + ops.ones(bsz, seq_len * aux_topk), + ).div_(seq_len * aux_topk / self.n_routed_experts) + aux_loss = (ce * scores_for_seq_aux.mean(dim=1)).sum( + dim=1 + ).mean() * self.alpha + else: + mask_ce = F.one_hot( + topk_idx_for_aux_loss.view(-1), num_classes=self.n_routed_experts + ) + ce = mask_ce.float().mean(0) + Pi = scores_for_aux.mean(0) + fi = ce * self.n_routed_experts + aux_loss = (Pi * fi).sum() * self.alpha + else: + aux_loss = None + return topk_idx, topk_weight, aux_loss + +# class AddAuxiliaryLoss(autograd.Function): +# """ +# The trick function of adding auxiliary (aux) loss, +# which includes the gradient of the aux loss during backpropagation. +# """ +# +# @staticmethod +# def forward(ctx, x, loss): +# assert loss.numel() == 1 +# ctx.dtype = loss.dtype +# ctx.required_aux_loss = loss.requires_grad +# return x +# +# @staticmethod +# def backward(ctx, grad_output): +# grad_loss = None +# if ctx.required_aux_loss: +# grad_loss = ops.ones(1, dtype=ctx.dtype) +# return grad_output, grad_loss + + +class DeepseekCoderMoE(nn.Module): + """ + A mixed expert module containing shared experts. + """ + + def __init__(self, config): + super().__init__() + self.config = config + self.num_experts_per_tok = config.num_experts_per_tok + + # if hasattr(config, "ep_size") and config.ep_size > 1: + # assert config.ep_size == dist.get_world_size() + # self.ep_size = config.ep_size + # self.experts_per_rank = config.n_routed_experts // config.ep_size + # self.ep_rank = dist.get_rank() + # self.experts = nn.ModuleList( + # [ + # ( + # DeepseekCoderMLP( + # config, intermediate_size=config.moe_intermediate_size + # ) + # if i >= self.ep_rank * self.experts_per_rank + # and i < (self.ep_rank + 1) * self.experts_per_rank + # else None + # ) + # for i in range(config.n_routed_experts) + # ] + # ) + # else: + self.ep_size = 1 + self.experts_per_rank = config.n_routed_experts + self.ep_rank = 0 + self.experts = nn.ModuleList( + [ + DeepseekCoderMLP( + config, intermediate_size=config.moe_intermediate_size + ) + for i in range(config.n_routed_experts) + ] + ) + self.gate = MoEGate(config) + if config.n_shared_experts is not None: + intermediate_size = config.moe_intermediate_size * config.n_shared_experts + self.shared_experts = DeepseekCoderMLP( + config=config, intermediate_size=intermediate_size + ) + + def forward(self, hidden_states): + identity = hidden_states + orig_shape = hidden_states.shape + topk_idx, topk_weight, aux_loss = self.gate(hidden_states) + hidden_states = hidden_states.view(-1, hidden_states.shape[-1]) + flat_topk_idx = topk_idx.view(-1) + # if self.training: + hidden_states = hidden_states.repeat_interleave( + self.num_experts_per_tok, dim=0 + ) + y = ops.empty(hidden_states.shape) + for i, expert in enumerate(self.experts): + y[flat_topk_idx == i] = expert(hidden_states[flat_topk_idx == i]) + # y = (y.view(*topk_weight.shape, -1) * topk_weight.unsqueeze(-1)).sum(dim=1) + y = ops.sum(y.view(*topk_weight.shape, -1) * topk_weight.unsqueeze(-1), dim=1) + y = y.to(hidden_states.dtype).view(*orig_shape) + # y = AddAuxiliaryLoss.apply(y, aux_loss) + # else: + # y = self.moe_infer(hidden_states, topk_idx, topk_weight).view(*orig_shape) + + if self.config.n_shared_experts is not None: + y = y + self.shared_experts(identity) + return y + + # @mindnlp.core.no_grad() + # def moe_infer(self, x, topk_ids, topk_weight): + # cnts = topk_ids.new_zeros((topk_ids.shape[0], len(self.experts))) + # cnts.scatter_(1, topk_ids, 1) + # tokens_per_expert = cnts.sum(dim=0) + # idxs = topk_ids.view(-1).argsort() + # sorted_tokens = x[idxs // topk_ids.shape[1]] + # sorted_tokens_shape = sorted_tokens.shape + # if self.ep_size > 1: + # tokens_per_ep_rank = tokens_per_expert.view(self.ep_size, -1).sum(dim=1) + # tokens_per_expert_group = tokens_per_expert.new_empty( + # tokens_per_expert.shape[0] + # ) + # dist.all_to_all_single(tokens_per_expert_group, tokens_per_expert) + # output_splits = ( + # tokens_per_expert_group.view(self.ep_size, -1) + # .sum(1) + # .cpu() + # .numpy() + # .tolist() + # ) + # gathered_tokens = sorted_tokens.new_empty( + # tokens_per_expert_group.sum(dim=0).cpu().item(), sorted_tokens.shape[1] + # ) + # input_split_sizes = tokens_per_ep_rank.cpu().numpy().tolist() + # dist.all_to_all( + # list(gathered_tokens.split(output_splits)), + # list(sorted_tokens.split(input_split_sizes)), + # ) + # tokens_per_expert_post_gather = tokens_per_expert_group.view( + # self.ep_size, self.experts_per_rank + # ).sum(dim=0) + # gatherd_idxs = np.zeros(shape=(gathered_tokens.shape[0],), dtype=np.int32) + # s = 0 + # for i, k in enumerate(tokens_per_expert_group.cpu().numpy()): + # gatherd_idxs[s : s + k] = i % self.experts_per_rank + # s += k + # gatherd_idxs = gatherd_idxs.argsort() + # sorted_tokens = gathered_tokens[gatherd_idxs] + # tokens_per_expert = tokens_per_expert_post_gather + # tokens_per_expert = tokens_per_expert.cpu().numpy() + # + # outputs = [] + # start_idx = 0 + # for i, num_tokens in enumerate(tokens_per_expert): + # end_idx = start_idx + num_tokens + # if num_tokens == 0: + # continue + # expert = self.experts[i + self.ep_rank * self.experts_per_rank] + # tokens_for_this_expert = sorted_tokens[start_idx:end_idx] + # expert_out = expert(tokens_for_this_expert) + # outputs.append(expert_out) + # start_idx = end_idx + # + # outs = ops.cat(outputs, dim=0) if len(outputs) else sorted_tokens.new_empty(0) + # if self.ep_size > 1: + # new_x = ops.empty(outs.shape) + # new_x[gatherd_idxs] = outs + # gathered_tokens = new_x.new_empty(*sorted_tokens_shape) + # dist.all_to_all( + # list(gathered_tokens.split(input_split_sizes)), + # list(new_x.split(output_splits)), + # ) + # outs = gathered_tokens + # + # new_x = ops.empty(outs.shape) + # new_x[idxs] = outs + # final_out = ( + # new_x.view(*topk_ids.shape, -1) + # .type(topk_weight.dtype) + # .mul_(topk_weight.unsqueeze(dim=-1)) + # .sum(dim=1) + # .type(new_x.dtype) + # ) + # return final_out + + +# Copied from transformers.models.llama.modeling_llama.repeat_kv +def repeat_kv(hidden_states: mindspore.Tensor, n_rep: int) -> mindspore.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].broadcast_to(( + batch, num_key_value_heads, n_rep, slen, head_dim + )) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +# Copied from transformers.models.llama.modeling_llama.LlamaAttention with Llama->DeepseekCoder +class DeepseekCoderAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: DeepseekCoderConfig, layer_idx: Optional[int] = None): + super().__init__() + self.config = config + self.layer_idx = layer_idx + if layer_idx is None: + logger.warning( + f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will " + "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " + "when creating this class." + ) + + self.attention_dropout = config.attention_dropout + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + + self.max_position_embeddings = config.max_position_embeddings + self.rope_theta = config.rope_theta + self.q_lora_rank = config.q_lora_rank + self.qk_rope_head_dim = config.qk_rope_head_dim + self.kv_lora_rank = config.kv_lora_rank + self.v_head_dim = config.v_head_dim + self.qk_nope_head_dim = config.qk_nope_head_dim + self.q_head_dim = config.qk_nope_head_dim + config.qk_rope_head_dim + + self.is_causal = True + + if self.q_lora_rank is None: + self.q_proj = nn.Linear( + self.hidden_size, self.num_heads * self.q_head_dim, bias=False + ) + else: + self.q_a_proj = nn.Linear( + self.hidden_size, config.q_lora_rank, bias=config.attention_bias + ) + self.q_a_layernorm = DeepseekCoderRMSNorm(config.q_lora_rank) + self.q_b_proj = nn.Linear( + config.q_lora_rank, self.num_heads * self.q_head_dim, bias=False + ) + + self.kv_a_proj_with_mqa = nn.Linear( + self.hidden_size, + config.kv_lora_rank + config.qk_rope_head_dim, + bias=config.attention_bias, + ) + self.kv_a_layernorm = DeepseekCoderRMSNorm(config.kv_lora_rank) + self.kv_b_proj = nn.Linear( + config.kv_lora_rank, + self.num_heads + * (self.q_head_dim - self.qk_rope_head_dim + self.v_head_dim), + bias=False, + ) + + self.o_proj = nn.Linear( + self.num_heads * self.v_head_dim, + self.hidden_size, + bias=config.attention_bias, + ) + self._init_rope() + + self.softmax_scale = self.q_head_dim ** (-0.5) + if self.config.rope_scaling is not None: + mscale_all_dim = self.config.rope_scaling.get("mscale_all_dim", 0) + scaling_factor = self.config.rope_scaling["factor"] + if mscale_all_dim: + mscale = yarn_get_mscale(scaling_factor, mscale_all_dim) + self.softmax_scale = self.softmax_scale * mscale * mscale + + def _init_rope(self): + if self.config.rope_scaling is None: + self.rotary_emb = DeepseekCoderRotaryEmbedding( + self.qk_rope_head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) + else: + scaling_type = self.config.rope_scaling["type"] + scaling_factor = self.config.rope_scaling["factor"] + if scaling_type == "linear": + self.rotary_emb = DeepseekCoderLinearScalingRotaryEmbedding( + self.qk_rope_head_dim, + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor, + base=self.rope_theta, + ) + elif scaling_type == "dynamic": + self.rotary_emb = DeepseekCoderDynamicNTKScalingRotaryEmbedding( + self.qk_rope_head_dim, + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor, + base=self.rope_theta, + ) + elif scaling_type == "yarn": + kwargs = { + key: self.config.rope_scaling[key] + for key in [ + "original_max_position_embeddings", + "beta_fast", + "beta_slow", + "mscale", + "mscale_all_dim", + ] + if key in self.config.rope_scaling + } + self.rotary_emb = DeepseekCoderYarnRotaryEmbedding( + self.qk_rope_head_dim, + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor, + base=self.rope_theta, + **kwargs, + ) + else: + raise ValueError(f"Unknown RoPE scaling type {scaling_type}") + + def _shape(self, tensor: mindspore.Tensor, seq_len: int, bsz: int): + return ops.transpose(tensor.view(bsz, seq_len, self.num_heads, self.v_head_dim), 1, 2) + + + def forward( + self, + hidden_states: mindspore.Tensor, + attention_mask: Optional[mindspore.Tensor] = None, + position_ids: Optional[mindspore.Tensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + **kwargs, + ) -> Tuple[mindspore.Tensor, Optional[mindspore.Tensor], Optional[Tuple[mindspore.Tensor]]]: + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + bsz, q_len, _ = hidden_states.shape + + if self.q_lora_rank is None: + q = self.q_proj(hidden_states) + else: + q = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states))) + q = ops.transpose(q.view(bsz, q_len, self.num_heads, self.q_head_dim), 1, 2) + q_nope, q_pe = ops.split( + q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1 + ) + + compressed_kv = self.kv_a_proj_with_mqa(hidden_states) + compressed_kv, k_pe = ops.split( + compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1 + ) + k_pe = ops.transpose(k_pe.view(bsz, q_len, 1, self.qk_rope_head_dim), 1, 2) + kv = ( + ops.transpose(self.kv_b_proj(self.kv_a_layernorm(compressed_kv)) + .view(bsz, q_len, self.num_heads, self.qk_nope_head_dim + self.v_head_dim), 1, 2) + ) + + k_nope, value_states = ops.split( + kv, [self.qk_nope_head_dim, self.v_head_dim], dim=-1 + ) + kv_seq_len = value_states.shape[-2] + if past_key_value is not None: + if self.layer_idx is None: + raise ValueError( + f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " + "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " + "with a layer index." + ) + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + + q_pe, k_pe = apply_rotary_pos_emb(q_pe, k_pe, cos, sin, position_ids) + + # query_states = k_pe.new_empty(bsz, self.num_heads, q_len, self.q_head_dim) + query_states = ops.empty((bsz, self.num_heads, q_len, self.q_head_dim), dtype=k_pe.dtype) + query_states[:, :, :, : self.qk_nope_head_dim] = q_nope + query_states[:, :, :, self.qk_nope_head_dim :] = q_pe + + # key_states = k_pe.new_empty(bsz, self.num_heads, q_len, self.q_head_dim) + key_states = ops.empty((bsz, self.num_heads, q_len, self.q_head_dim), dtype=k_pe.dtype) + key_states[:, :, :, : self.qk_nope_head_dim] = k_nope + key_states[:, :, :, self.qk_nope_head_dim :] = k_pe + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models + key_states, value_states = past_key_value.update( + key_states, value_states, self.layer_idx, cache_kwargs + ) + + attn_weights = ( + ops.matmul(query_states, ops.transpose(key_states, 2, 3)) * self.softmax_scale + ) + + if attn_weights.shape != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" + f" {attn_weights.shape}" + ) + assert attention_mask is not None + if attention_mask is not None: + if attention_mask.shape != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.shape}" + ) + attn_weights = attn_weights + attention_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax( + attn_weights, dim=-1, dtype=mindspore.float32 + ).to(query_states.dtype) + attn_weights = nn.functional.dropout( + attn_weights, p=self.attention_dropout, training=self.training + ) + attn_output = ops.matmul(attn_weights, value_states) + + if attn_output.shape != (bsz, self.num_heads, q_len, self.v_head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.v_head_dim)}, but is" + f" {attn_output.shape}" + ) + + attn_output = ops.transpose(attn_output, 1, 2) + + attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.v_head_dim) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +ATTENTION_CLASSES = { + "eager": DeepseekCoderAttention, +} + + +class DeepseekCoderDecoderLayer(nn.Module): + def __init__(self, config: DeepseekCoderConfig, layer_idx: int): + super().__init__() + self.hidden_size = config.hidden_size + + self.self_attn = ATTENTION_CLASSES[config._attn_implementation]( + config=config, layer_idx=layer_idx + ) + + self.mlp = ( + DeepseekCoderMoE(config) + if ( + config.n_routed_experts is not None + and layer_idx >= config.first_k_dense_replace + and layer_idx % config.moe_layer_freq == 0 + ) + else DeepseekCoderMLP(config) + ) + self.input_layernorm = DeepseekCoderRMSNorm( + config.hidden_size, eps=config.rms_norm_eps + ) + self.post_attention_layernorm = DeepseekCoderRMSNorm( + config.hidden_size, eps=config.rms_norm_eps + ) + + def forward( + self, + hidden_states: mindspore.Tensor, + attention_mask: Optional[mindspore.Tensor] = None, + position_ids: Optional[mindspore.Tensor] = None, + past_key_value: Optional[Tuple[mindspore.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + **kwargs, + ) -> Tuple[ + mindspore.Tensor, Optional[Tuple[mindspore.Tensor, mindspore.Tensor]] + ]: + """ + Args: + hidden_states (`mindspore.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`mindspore.Tensor`, *optional*): + attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, + query_sequence_length, key_sequence_length)` if default attention is used. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(mindspore.Tensor)`, *optional*): cached past key and value projection states + """ + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + **kwargs, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +DeepseekCoder_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`DeepseekCoderConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +class DeepseekCoderPreTrainedModel(PreTrainedModel): + config_class = DeepseekCoderConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["DeepseekCoderDecoderLayer"] + _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True + _supports_cache_class = True + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + nn.init.normal_(module.weight.data, mean=0, std=std) + if module.bias is not None: + nn.init.zeros_(module.bias.data) + elif isinstance(module, nn.Embedding): + nn.init.normal_(module.weight.data, mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx] = 0 + + +DeepseekCoder_INPUTS_DOCSTRING = r""" + Args: + input_ids (`mindspore.Tensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`Cache` or `tuple(tuple(mindspore.Tensor))`, *optional*): + Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` + returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. + + Two formats are allowed: + - a [`~cache_utils.Cache`] instance; + - Tuple of `tuple(mindspore.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy + cache format. + + The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the + legacy cache format will be returned. + + If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't + have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` + of shape `(batch_size, sequence_length)`. + inputs_embeds (`mindspore.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +class DeepseekCoderModel(DeepseekCoderPreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`DeepseekCoderDecoderLayer`] + + Args: + config: DeepseekCoderConfig + """ + + def __init__(self, config: DeepseekCoderConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding( + config.vocab_size, config.hidden_size, self.padding_idx + ) + self.layers = nn.ModuleList( + [ + DeepseekCoderDecoderLayer(config, layer_idx) + for layer_idx in range(config.num_hidden_layers) + ] + ) + self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" + self.norm = DeepseekCoderRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + + def forward( + self, + input_ids: mindspore.Tensor = None, + attention_mask: Optional[mindspore.Tensor] = None, + position_ids: Optional[mindspore.Tensor] = None, + past_key_values: Optional[List[mindspore.Tensor]] = None, + inputs_embeds: Optional[mindspore.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError( + "You cannot specify both input_ids and inputs_embeds at the same time" + ) + elif input_ids is not None: + batch_size, seq_length = input_ids.shape[:2] + elif inputs_embeds is not None: + batch_size, seq_length = inputs_embeds.shape[:2] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`transformers." + ) + use_cache = False + + past_key_values_length = 0 + if use_cache: + use_legacy_cache = not isinstance(past_key_values, Cache) + if use_legacy_cache: + past_key_values = DynamicCache.from_legacy_cache(past_key_values) + past_key_values_length = past_key_values.get_usable_length(seq_length) + + if position_ids is None: + position_ids = ops.arange( + past_key_values_length, + seq_length + past_key_values_length, + dtype=mindspore.int64, + ) + position_ids = position_ids.unsqueeze(0) + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if self._use_flash_attention_2: + # 2d mask is passed through the layers + attention_mask = ( + attention_mask + if (attention_mask is not None and 0 in attention_mask) + else None + ) + else: + # 4d mask is passed through the layers + attention_mask = _prepare_4d_causal_attention_mask( + attention_mask, + (batch_size, seq_length), + inputs_embeds, + past_key_values_length, + ) + + # embed positions + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = None + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + attention_mask, + position_ids, + past_key_values, + output_attentions, + use_cache, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_values, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache = layer_outputs[2 if output_attentions else 1] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = None + if use_cache: + next_cache = ( + next_decoder_cache.to_legacy_cache() + if use_legacy_cache + else next_decoder_cache + ) + if not return_dict: + return tuple( + v + for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] + if v is not None + ) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +class DeepseekCoderForCausalLM(DeepseekCoderPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.model = DeepseekCoderModel(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + + def forward( + self, + input_ids: mindspore.Tensor = None, + attention_mask: Optional[mindspore.Tensor] = None, + position_ids: Optional[mindspore.Tensor] = None, + past_key_values: Optional[List[mindspore.Tensor]] = None, + inputs_embeds: Optional[mindspore.Tensor] = None, + labels: Optional[mindspore.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, transformers., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, transformers., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from mindnlp.transformers import AutoTokenizer, DeepseekCoderForCausalLM + + >>> model = DeepseekCoderForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) + >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + logits = logits.float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :] + shift_labels = labels[..., 1:] + # Flatten the tokens + loss_fct = nn.CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, + input_ids, + past_key_values=None, + attention_mask=None, + inputs_embeds=None, + **kwargs, + ): + if past_key_values is not None: + if isinstance(past_key_values, Cache): + cache_length = past_key_values.get_seq_length() + past_length = past_key_values.seen_tokens + max_cache_length = past_key_values.get_max_length() + else: + cache_length = past_length = past_key_values[0][0].shape[2] + max_cache_length = None + + # Keep only the unprocessed tokens: + # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where + # some of the inputs are exclusivelly passed as part of the cache (e.g. when passing input_embeds as + # input) + if ( + attention_mask is not None + and attention_mask.shape[1] > input_ids.shape[1] + ): + input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :] + # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard + # input_ids based on the past_length. + elif past_length < input_ids.shape[1]: + input_ids = input_ids[:, past_length:] + # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. + + # If we are about to go beyond the maximum cache length, we need to crop the input attention mask. + if ( + max_cache_length is not None + and attention_mask is not None + and cache_length + input_ids.shape[1] > max_cache_length + ): + attention_mask = attention_mask[:, -max_cache_length:] + + position_ids = kwargs.get("position_ids", None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.to(mindspore.int32).cumsum(-1) - 1 + # position_ids.masked_fill_(attention_mask == 0, 1) + position_ids = ops.masked_fill(position_ids, attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "position_ids": position_ids, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + } + ) + return model_inputs + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple( + past_state.index_select(0, beam_idx) + for past_state in layer_past + ), + ) + return reordered_past + + +class DeepseekCoderForSequenceClassification(DeepseekCoderPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = DeepseekCoderModel(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + + def forward( + self, + input_ids: mindspore.Tensor = None, + attention_mask: Optional[mindspore.Tensor] = None, + position_ids: Optional[mindspore.Tensor] = None, + past_key_values: Optional[List[mindspore.Tensor]] = None, + inputs_embeds: Optional[mindspore.Tensor] = None, + labels: Optional[mindspore.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`mindspore.Tensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, transformers., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + transformer_outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError( + "Cannot handle batch sizes > 1 if no padding token is defined." + ) + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + sequence_lengths = ( + ops.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 + ) + else: + sequence_lengths = -1 + + pooled_logits = logits[ + ops.arange(batch_size), sequence_lengths + ] + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and ( + labels.dtype in (mindspore.int32, mindspore.int64) + ): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = nn.MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = nn.CrossEntropyLoss() + loss = loss_fct( + pooled_logits.view(-1, self.num_labels), labels.view(-1) + ) + elif self.config.problem_type == "multi_label_classification": + loss_fct = nn.BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + +__all__ = [ + "DeepseekCoderPreTrainedModel", + "DeepseekCoderModel", + "DeepseekCoderForCausalLM", + "DeepseekCoderForSequenceClassification" +] diff --git a/mindnlp/transformers/models/deepseek_coder/tokenization_deepseek_coder_fast.py b/mindnlp/transformers/models/deepseek_coder/tokenization_deepseek_coder_fast.py new file mode 100644 index 000000000..c7faaa5f5 --- /dev/null +++ b/mindnlp/transformers/models/deepseek_coder/tokenization_deepseek_coder_fast.py @@ -0,0 +1,89 @@ +# coding=utf-8 +# Copyright 2024 Huawei Technologies Co., Ltd +# Copyright 2023 DeepSeek-AI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for DeepSeek Coder.""" + +from typing import List, Optional, Union + +from mindnlp.transformers.models.llama import LlamaTokenizerFast + + +class DeepseekCoderTokenizerFast(LlamaTokenizerFast): + """ + Construct a "fast" DeepSeek Coder tokenizer (backed by HuggingFace's *tokenizers* library). + + This tokenizer inherits from [`LlamaTokenizerFast`]. + + For more details, check the doc on HuggingFace's website. + """ + + vocab_files_names = LlamaTokenizerFast.vocab_files_names + pretrained_vocab_files_map = {} + max_model_input_sizes = {} + model_input_names = ["input_ids", "attention_mask"] + slow_tokenizer_class = None + padding_side = "left" + + def __init__(self, *args, **kwargs): + kwargs.pop("legacy", None) + super().__init__(*args, legacy=False, **kwargs) + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> List[str]: + return super().save_vocabulary(save_directory, filename_prefix=filename_prefix) + + +class DeepseekCoderTokenizer(DeepseekCoderTokenizerFast): + """ + Construct a DeepSeek Coder tokenizer. Based on byte-level Byte-Pair-Encoding. + + This tokenizer inherits from [`DeepseekCoderTokenizerFast`]. + + For more details, check the doc on HuggingFace's website. + """ + + def convert_ids_to_tokens( + self, ids: Union[int, List[int]], skip_special_tokens: bool = False + ) -> Union[str, List[str]]: + """ + Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and + added tokens. + + Args: + ids (`int` or `List[int]`): + The token id (or token ids) to convert to tokens. + skip_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not to remove special tokens in the decoding. + + Returns: + `str` or `List[str]`: The decoded token(s). + """ + if isinstance(ids, int): + return self._convert_id_to_token(ids) + tokens = [] + for index in ids: + index = int(index) + if skip_special_tokens and index in self.all_special_ids: + continue + token = self._tokenizer.id_to_token(index) + tokens.append(token if token is not None else "") + return tokens + + def _convert_id_to_token(self, index: int) -> Optional[str]: + token = self._tokenizer.id_to_token(int(index)) + return token if token is not None else "" + + +__all__ = ["DeepseekCoderTokenizer", + "DeepseekCoderTokenizerFast"] \ No newline at end of file