手写实现 LLM 结构工作量大,难以实时跟进最新模型的结构创新;
从零实现的 LLM 训练无法较好地实现多卡分布式训练,训练效率较低;
config.json 文件即是模型的配置信息,包括了模型的架构、隐藏层大小、模型层数等,如图6.4所示:
import os
# 设置环境变量,此处使用 HuggingFace 镜像网站
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
# 下载模型
os.system('huggingface-cli download --resume-download Qwen/Qwen2.5-1.5B --local-dir your_local_dir')
# 加载定义好的模型参数-此处以 Qwen-2.5-1.5B 为例 # 使用 transforemrs 的 Config 类进行加载 from transformers import AutoConfig # 下载参数的本地路径 model_path = "qwen-1.5b" config = AutoConfig.from_pretrained(model_name_or_path)
# 使用该配置生成一个定义好的模型 from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_config(config,trust_remote_code=True)
from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(model_name_or_path,trust_remote_code=True)
# 加载一个预训练好的 tokenizer from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
# 加载预训练数据
from datasets import load_dataset
ds = load_dataset('json', data_files='/mobvoi_seq_monkey_general_open_corpus.jsonl')
train 键对应的值中,可以通过以下代码查看:
ds["train"][0]
# 查看特征 column_names = list(ds["train"].features) # columnes_name:["text"]
# 对数据集进行 tokenize
def tokenize_function(examples):
# 使用预先加载的 tokenizer 进行分词
output = tokenizer([item for item in examples["text"]])
return output
# 批量处理
tokenized_datasets = ds.map(
tokenize_function,
batched=True,
num_proc=10,
remove_columns=column_names,
load_from_cache_file=True,
desc="Running tokenizer on dataset",
)
# 预训练一般将文本拼接成固定长度的文本段
from itertools import chain
# 这里我们取块长为 2048
block_size = 2048
def group_texts(examples):
# 将文本段拼接起来
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
# 计算拼起来的整体长度
total_length = len(concatenated_examples[list(examples.keys())[0]])
# 如果长度太长,进行分块
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# 按 block_size 进行切分
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
# CLM 任务,labels 和 input 是相同的
result["labels"] = result["input_ids"].copy()
return result
# 批量处理
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=10,
load_from_cache_file=True,
desc=f"Grouping texts in chunks of {block_size}",
batch_size = 40000,
)
train_dataset = lm_datasets["train"]
from transformers import TrainingArguments
# 配置训练参数
training_args = TrainingArguments(
output_dir="output",# 训练参数输出路径
per_device_train_batch_size=4,# 训练的 batch_size
gradient_accumulation_steps=4,# 梯度累计步数,实际 bs = 设置的 bs * 累计步数
logging_steps=10,# 打印 loss 的步数间隔
num_train_epochs=1,# 训练的 epoch 数
save_steps=100, # 保存模型参数的步数间隔
learning_rate=1e-4,# 学习率
gradient_checkpointing=True# 开启梯度检查点
)
from transformers import Trainer, default_data_collator
from torchdata.datapipes.iter import IterableWrapper
# 训练器
trainer = Trainer(
model=model,
args=training_args,
train_dataset= IterableWrapper(train_dataset),
eval_dataset= None,
tokenizer=tokenizer,
# 默认为 MLM 的 collator,使用 CLM 的 collater
data_collator=default_data_collator
)
trainer.train()
注:上述代码存放于./code/pretrain.ipynb文件中。
./code/pretrain.py)来实现训练全流程。
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from torchdata.datapipes.iter import IterableWrapper
from itertools import chain
import deepspeed
from typing import Optional,List
import datasets
import pandas as pd
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
import datetime
from transformers.testing_utils import CaptureLogger
from transformers.trainer_utils import get_last_checkpoint
import swanlab
# 超参类
@dataclass
class ModelArguments:
"""
关于模型的参数
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": (
"后训练使用,为预训练模型参数地址"
)
},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "预训练使用,Config 文件地址"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "预训练 Tokenizer 地址"}
)
torch_dtype: Optional[str] = field(
default=None,
metadata={
"help": (
"模型训练使用的数据类型,推荐 bfloat16"
),
"choices": ["auto", "bfloat16", "float16", "float32"],
},
)
@dataclass
class DataTrainingArguments:
"""
关于训练的参数
"""
train_files: Optional[List[str]] = field(default=None, metadata={"help": "训练数据路径"})
block_size: Optional[int] = field(
default=None,
metadata={
"help": (
"设置的文本块长度"
)
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "预处理使用线程数."},
)
# 加载脚本参数 parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# 设置日志
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
# 将日志级别设置为 INFO
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# 训练整体情况记录
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# 检查 checkpoint
last_checkpoint = None
if os.path.isdir(training_args.output_dir):
# 使用 transformers 自带的 get_last_checkpoint 自动检测
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"输出路径 ({training_args.output_dir}) 非空 "
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"从 {last_checkpoint}恢复训练"
)
# 初始化模型
if model_args.config_name is not None:
# from scrach
config = AutoConfig.from_pretrained(model_args.config_name)
logger.warning("你正在从零初始化一个模型")
logger.info(f"模型参数配置地址:{model_args.config_name}")
logger.info(f"模型参数:{config}")
model = AutoModelForCausalLM.from_config(config,trust_remote_code=True)
n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values())
logger.info(f"预训练一个新模型 - Total size={n_params/2**20:.2f}M params")
elif model_args.model_name_or_path is not None:
logger.warning("你正在初始化一个预训练模型")
logger.info(f"模型参数地址:{model_args.model_name_or_path}")
model = AutoModelForCausalLM.from_pretrained(model_args.model_name_or_path,trust_remote_code=True)
n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values())
logger.info(f"继承一个预训练模型 - Total size={n_params/2**20:.2f}M params")
else:
logger.error("config_name 和 model_name_or_path 不能均为空")
raise ValueError("config_name 和 model_name_or_path 不能均为空")
logger.info("初始化 Trainer")
trainer = Trainer(
model=model,
args=training_args,
train_dataset= IterableWrapper(train_dataset),
tokenizer=tokenizer,
data_collator=default_data_collator
)
# 从 checkpoint 加载
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
logger.info("开始训练")
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
# 初始化 SwanLab swanlab.init(project="pretrain", experiment_name="from_scrach")
./code/pretrain.sh)定义超参数的值,并通过 Deepspeed 启动训练,从而实现高效的多卡分布式训练:
# 设置可见显卡
CUDA_VISIBLE_DEVICES=0,1
deepspeed pretrain.py
--config_name autodl-tmp/qwen-1.5b
--tokenizer_name autodl-tmp/qwen-1.5b
--train_files autodl-tmp/dataset/pretrain_data/mobvoi_seq_monkey_general_open_corpus_small.jsonl
--per_device_train_batch_size 16
--gradient_accumulation_steps 4
--do_train
--output_dir autodl-tmp/output/pretrain
--evaluation_strategy no
--learning_rate 1e-4
--num_train_epochs 1
--warmup_steps 200
--logging_dir autodl-tmp/output/pretrain/logs
--logging_strategy steps
--logging_steps 5
--save_strategy steps
--save_steps 100
--preprocessing_num_workers 10
--save_total_limit 1
--seed 12
--block_size 2048
--bf16
--gradient_checkpointing
--deepspeed ./ds_config_zero2.json
--report_to swanlab
# --resume_from_checkpoint ${output_model}/checkpoint-20400
ds_config_zero.json 作为 DeepSpeed 的配置参数:
{
"fp16": {
"enabled": "auto",
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1
},
"bf16": {
"enabled": "auto"
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": "auto",
"betas": "auto",
"eps": "auto",
"weight_decay": "auto"
}
},
"scheduler": {
"type": "WarmupLR",
"params": {
"warmup_min_lr": "auto",
"warmup_max_lr": "auto",
"warmup_num_steps": "auto"
}
},
"zero_optimization": {
"stage": 2,
"offload_optimizer": {
"device": "none",
"pin_memory": true
},
"allgather_partitions": true,
"allgather_bucket_size": 2e8,
"overlap_comm": true,
"reduce_scatter": true,
"reduce_bucket_size": 2e8,
"contiguous_gradients": true
},
"gradient_accumulation_steps": "auto",
"gradient_clipping": "auto",
"steps_per_print": 100,
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"wall_clock_breakdown": false
}
pretrain.sh 脚本即可开始训练。
./code/finetune.py。
# 不同的 tokenizer 需要特别定义
# BOS
im_start = tokenizer("<|im_start|>").input_ids
# EOS
im_end = tokenizer("<|im_end|>").input_ids
# PAD
IGNORE_TOKEN_ID = tokenizer.pad_token_id
# 换行符
nl_tokens = tokenizer('n').input_ids
# 角色标识符
_system = tokenizer('system').input_ids + nl_tokens
_user = tokenizer('human').input_ids + nl_tokens
_assistant = tokenizer('assistant').input_ids + nl_tokens
# 拼接多轮对话
input_ids, targets = [], []
# 多个样本
for i in tqdm(range(len(sources))):
# source 为一个多轮对话样本
source = sources[i]
# 从 user 开始
if source[0]["from"] != "human":
source = source[1:]
# 分别是输入和输出
input_id, target = [], []
# system: 【BOS】systemnYou are a helpful assistant.【EOS】n
system = im_start + _system + tokenizer(system_message).input_ids + im_end + nl_tokens
input_id += system
# system 不需要拟合
target += im_start + [IGNORE_TOKEN_ID] * (len(system)-3) + im_end + nl_tokens
assert len(input_id) == len(target)
# 依次拼接
for j, sentence in enumerate(source):
# sentence 为一轮对话
role = roles[sentence["from"]]
# user:<|im_start|>humanninstruction【EOS】n
# assistant:<|im_start|>assistantnresponse【EOS】n
_input_id = tokenizer(role).input_ids + nl_tokens +
tokenizer(sentence["value"]).input_ids + im_end + nl_tokens
input_id += _input_id
if role == '<|im_start|>human':
# user 不需要拟合
_target = im_start + [IGNORE_TOKEN_ID] * (len(_input_id)-3) + im_end + nl_tokens
elif role == '<|im_start|>assistant':
# assistant 需要拟合
_target = im_start + [IGNORE_TOKEN_ID] * len(tokenizer(role).input_ids) +
_input_id[len(tokenizer(role).input_ids)+1:-2] + im_end + nl_tokens
else:
print(role)
raise NotImplementedError
target += _target
assert len(input_id) == len(target)
# 最后进行 PAD
input_id += [tokenizer.pad_token_id] * (max_len - len(input_id))
target += [IGNORE_TOKEN_ID] * (max_len - len(target))
input_ids.append(input_id[:max_len])
targets.append(target[:max_len])
Torch.tensor,再拼接成 Dataset 所需的字典返回即可:
input_ids = torch.tensor(input_ids)
targets = torch.tensor(targets)
return dict(
input_ids=input_ids,
labels=targets,
attention_mask=input_ids.ne(tokenizer.pad_token_id),
)
class SupervisedDataset(Dataset):
def __init__(self, raw_data, tokenizer, max_len: int):
super(SupervisedDataset, self).__init__()
# 加载并预处理数据
sources = [example["conversations"] for example in raw_data]
# preprocess 即上文定义的数据预处理逻辑
data_dict = preprocess(sources, tokenizer, max_len)
self.input_ids = data_dict["input_ids"]
self.labels = data_dict["labels"]
self.attention_mask = data_dict["attention_mask"]
def __len__(self):
return len(self.input_ids)
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
return dict(
input_ids=self.input_ids[i],
labels=self.labels[i],
attention_mask=self.attention_mask[i],
)
# 加载脚本参数
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# 初始化 SwanLab
swanlab.init(project="sft", experiment_name="qwen-1.5b")
# 设置日志
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
# 将日志级别设置为 INFO
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# 训练整体情况记录
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# 检查 checkpoint
last_checkpoint = None
if os.path.isdir(training_args.output_dir):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"输出路径 ({training_args.output_dir}) 非空 "
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"从 {last_checkpoint}恢复训练"
)
# 设置随机数种子.
set_seed(training_args.seed)
# 初始化模型
logger.warning("加载预训练模型")
logger.info(f"模型参数地址:{model_args.model_name_or_path}")
model = AutoModelForCausalLM.from_pretrained(model_args.model_name_or_path,trust_remote_code=True)
n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values())
logger.info(f"继承一个预训练模型 - Total size={n_params/2**20:.2f}M params")
# 初始化 Tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path)
logger.info("完成 tokenizer 加载")
# 加载微调数据
with open(data_args.train_files) as f:
lst = [json.loads(line) for line in f.readlines()[:10000]]
logger.info("完成训练集加载")
logger.info(f"训练集地址:{data_args.train_files}")
logger.info(f'训练样本总数:{len(lst)}')
# logger.info(f"训练集采样:{ds["train"][0]}")
train_dataset = SupervisedDataset(lst, tokenizer=tokenizer, max_len=2048)
logger.info("初始化 Trainer")
trainer = Trainer(
model=model,
args=training_args,
train_dataset= IterableWrapper(train_dataset),
tokenizer=tokenizer
)
# 从 checkpoint 加载
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
logger.info("开始训练")
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
可以针对不同的下游任务构建小型 LoRA 模块,从而在共享预训练模型参数基础上有效地切换下游任务。
LoRA 使用自适应优化器(Adaptive Optimizer),不需要计算梯度或维护大多数参数的优化器状态,训练更有效、硬件门槛更低。
LoRA 使用简单的线性设计,在部署时将可训练矩阵与冻结权重合并,不存在推理延迟。
target_modules = ["q_proj","v_proj"]
# 找到模型的各个组件中,名字里带"q_proj","v_proj"的 target_module_found = re.fullmatch(self.peft_config.target_modules, key) # 这里的 key,是模型的组件名
class LoraLayer:
def __init__(
self,
r: int, # LoRA 的秩
lora_alpha: int, # 归一化参数
lora_dropout: float, # LoRA 层的 dropout 比例
merge_weights: bool, # eval 模式中,是否将 LoRA 矩阵的值加到原权重矩阵上
):
self.r = r
self.lora_alpha = lora_alpha
# Optional dropout
if lora_dropout > 0.0:
self.lora_dropout = nn.Dropout(p=lora_dropout)
else:
self.lora_dropout = lambda x: x
# Mark the weight as unmerged
self.merged = False
self.merge_weights = merge_weights
self.disable_adapters = False
class Linear(nn.Linear, LoraLayer):
# LoRA 层
def __init__(
self,
in_features: int,
out_features: int,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.0,
fan_in_fan_out: bool = False,
merge_weights: bool = True,
**kwargs,
):
# 继承两个基类的构造函数
nn.Linear.__init__(self, in_features, out_features, **kwargs)
LoraLayer.__init__(self, r=r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, merge_weights=merge_weights)
self.fan_in_fan_out = fan_in_fan_out
# Actual trainable parameters
if r > 0:
# 参数矩阵 A
self.lora_A = nn.Linear(in_features, r, bias=False)
# 参数矩阵 B
self.lora_B = nn.Linear(r, out_features, bias=False)
# 归一化系数
self.scaling = self.lora_alpha / self.r
# 冻结原参数,仅更新 A 和 B
self.weight.requires_grad = False
# 初始化 A 和 B
self.reset_parameters()
if fan_in_fan_out:
self.weight.data = self.weight.data.T
def forward(self, x: torch.Tensor):
if self.disable_adapters:
if self.r > 0 and self.merged:
self.weight.data -= (
transpose(self.lora_B.weight @ self.lora_A.weight, self.fan_in_fan_out) * self.scaling
)
self.merged = False
return F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
'''主要分支'''
elif self.r > 0 and not self.merged:
result = F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
if self.r > 0:
result += self.lora_B(self.lora_A(self.lora_dropout(x))) * self.scaling
return result
else:
return F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
import torch.nn as nn from transformers import AutoTokenizer, AutoModel from peft import get_peft_model, LoraConfig, TaskType, PeftModel from transformers import Trainer
# 加载基座模型
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, trust_remote_code=True)
model = AutoModel.from_pretrained(
MODEL_PATH, trust_remote_code=True
)
peft_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
inference_mode=False,
r=8,
lora_alpha=32,
lora_dropout=0.1,
)
model = get_peft_model(model, peft_config)
trainer = Trainer(
model=model,
args=training_args,
train_dataset= IterableWrapper(train_dataset),
tokenizer=tokenizer
)
trainer.train()
get_peft_model 获取一个 LoRA 模型即可,其他的不需要进行任何修改。但要注意的是,LoRA 微调能够大幅度降低显卡占用,且在下游任务适配上能够取得较好的效果,但如果是需要学习对应知识的任务,LoRA 由于只调整低秩矩阵,难以实现知识的注入,一般效果不佳,因此不推荐使用 LoRA 进行模型预训练或后训练。
transformers
deepspeed
peft
wandb
tokenizers
分词器训练
数据集构建
模型搭建/继承预训练模型
构造 Trainer 进行训练
分词器训练
数据集构建
LoRA 配置
继承预训练模型
构造 Trainer 进行训练
状态(State) :这是一个系统在某一时刻的具体状况。比如在一个棋盘游戏中,状态可以表示棋盘上所有棋子的当前排列情况。对于一个自动驾驶汽车来说,状态可能包括汽车的速度、位置,以及周围障碍物的位置等。
动作(Action) :动作是智能体在给定状态下可执行的操作。以自行车为例,动作可能包括前进、停止、转弯等。在一个复杂的系统中,动作集可以非常庞大。
奖励(Reward) :这是智能体在执行某个动作后获得的反馈,通常是一个数值。奖励可以是立即的,也可以是延后的。一个好的动作可能会得到正奖励,而不好的动作可能会得到负奖励。
策略(Policy) :策略是一套指导智能体如何选择动作的规则。简单来说,策略就是告诉智能体在每个状态下应该做什么。
价值函数(Value Function) :这是一种对策略的评估工具,旨在预测从当前状态出发,长期来看能够获得的总奖励。值函数帮助智能体不仅考虑当前步骤的奖励,而且能更好地权衡短期和长期的收益。
观察状态 :智能体首先观察当前的状态(State)。
选择动作 :根据观察到的状态和预先确定的策略,智能体选择一个动作(Action)。
执行动作 :智能体执行所选的动作。
接收奖励和新状态 :执行动作后,智能体从环境中接收到相应的奖励(Reward)和更新后的新状态(State)。
$$E(R(tau))_{tau sim P_{theta}(tau)$$:表示在策略 $$P_{theta}(tau$$ 下轨迹 $$ta$$ 的回报 $$R(tau$$ 的期望值。
$$R(tau$$:轨迹 $$ta$$ 的回报,即从起始状态到终止状态获得的所有奖励的总和。
$$ta$$:表示一条轨迹,即智能体在环境中的状态和动作序列。
$$P_{theta}(tau$$:在参数 $$thet$$ 下生成轨迹 $$ta$$ 的概率,通常由策略或策略网络确定。
[
{
"question": "Python中的列表是什么?",
"chosen": "Python中的列表是一种有序的可变容器,允许存储多个元素,并且可以通过索引访问。",
"rejected": "Python中的列表用于存储数据。"
},
{
"question": "Python中的元组是什么?",
"chosen": "Python中的元组是一种有序的不可变容器,允许存储多个元素,并且一旦创建就不能修改。",
"rejected": "Python中的元组用于存储数据。"
}
]
Material BoF 花瓣 Knstrct 站酷 数英 知乎 优设网 人人产品经理 CoroFlot UsePanda 智能UI设计 Dolody KimiChat ReadPaper 微软研究院 智谱AI AMiner AI知识社区 AdobeColor 象寄 Gitee O2 Lab Designup 爱范 良仓 DesignShard PoxelHours 博思 极客公园 36氪 Midjourney 无界AI 小红书 Houzz ArchDaily 蝉妈妈 Cocos引擎 DesignerNews 腾讯一站式设计 Oschina FAW RafalTomal UI中国 Unsplash 墨刀 AntDesign 字由 Figma Bilibili
电 话: 010-62128818
Email: deepelement.ai@outlook.com
注册会员开通