1 系统环境
硬件环境(Ascend/GPU/CPU): Ascend910
MindSpore版本: mindspore=2.3.0、mindformer=r.1.2.0
执行模式(PyNative/ Graph):不限
Python版本: Python=3.8
操作系统平台: linux
2 报错信息
2.1 问题描述
使用ms2.3.0,mindformer-r.1.2.0,微调qwen1_5_7b,配置文件如下:
2.2 配置信息
seed: 42
output_dir: './output'
load_checkpoint: ''
auto_trans_ckpt: False # If true, auto transform load_checkpoint to load in distributed model
only_save_strategy: False
resume_training: False
run_mode: 'finetune'
# trainer config
trainer:
type: CausalLanguageModelingTrainer
model_name: 'qwen2_7b'
# if True, do evaluate during the training process. if false, do nothing.
# note that the task trainer should support _evaluate_in_training function.
do_eval: False
eval_step_interval: -1 # num of step intervals between each eval, -1 means no step end eval.
eval_epoch_interval: 50 # num of epoch intervals between each eval, 1 means eval on every epoch end.
# runner config
runner_config:
epochs: 5
batch_size: 1
sink_mode: True
sink_size: 1
# wrapper cell config
runner_wrapper:
type: MFTrainOneStepCell
scale_sense:
type: DynamicLossScaleUpdateCell
loss_scale_value: 4096
scale_factor: 2
scale_window: 1000
use_clip_grad: True
# optimizer
optimizer:
type: FP32StateAdamWeightDecay
beta1: 0.9
beta2: 0.95
eps: 1.e-8
learning_rate: 1.e-6
weight_decay: 0.01
# lr schedule
lr_schedule:
type: CosineWithWarmUpLR
learning_rate: 1.e-6
warmup_ratio: 0.01
total_steps: -1 # -1 means it will load the total steps of the dataset
# dataset
train_dataset: &train_dataset
data_loader:
type: MindDataset
dataset_dir: ""
shuffle: True
input_columns: ["input_ids", "target_ids", "attention_mask"]
num_parallel_workers: 8
python_multiprocessing: False
drop_remainder: True
batch_size: 1
repeat: 1
numa_enable: False
prefetch_size: 1
train_dataset_task:
type: CausalLanguageModelDataset
dataset_config: *train_dataset
# eval dataset
eval_dataset: &eval_dataset
data_loader:
type: MindDataset
dataset_dir: ""
shuffle: False
input_columns: ["input_ids", "target_ids", "attention_mask"]
num_parallel_workers: 8
python_multiprocessing: False
drop_remainder: False
repeat: 1
numa_enable: False
prefetch_size: 1
eval_dataset_task:
type: CausalLanguageModelDataset
dataset_config: *eval_dataset
use_parallel: True
# parallel context config
parallel:
parallel_mode: 1 # 0-data parallel, 1-semi-auto parallel, 2-auto parallel, 3-hybrid parallel
gradients_mean: False
enable_alltoall: False
full_batch: True
search_mode: "sharding_propagation"
enable_parallel_optimizer: True
strategy_ckpt_save_file: "./ckpt_strategy.ckpt"
parallel_optimizer_config:
gradient_accumulation_shard: False
parallel_optimizer_threshold: 64
# default parallel of device num = 8 910
parallel_config:
data_parallel: 1
model_parallel: 8
pipeline_stage: 1
use_seq_parallel: True
micro_batch_num: 1
vocab_emb_dp: True
gradient_aggregation_group: 8
# when model parallel is greater than 1, we can set micro_batch_interleave_num=2, that may accelerate the train process.
micro_batch_interleave_num: 1
# recompute config
recompute_config:
recompute: True
select_recompute: False
parallel_optimizer_comm_recompute: False
mp_comm_recompute: False
recompute_slice_activation: False
# callbacks
callbacks:
- type: MFLossMonitor
- type: CheckpointMonitor
prefix: "qwen2"
save_checkpoint_steps: 5000
keep_checkpoint_max: 1
integrated_save: False
async_save: False
- type: ObsMonitor
# mindspore context init config
context:
mode: 0 #0--Graph Mode; 1--Pynative Mode
device_target: "Ascend"
enable_graph_kernel: False
max_call_depth: 10000
max_device_memory: "55GB"
save_graphs: False
save_graphs_path: "./graph"
device_id: 0
jit_config:
jit_level: "O0"
ascend_config:
precision_mode: "must_keep_origin_dtype"
# model config
model:
model_config:
type: LlamaConfig
batch_size: 1 # add for increase predict
seq_length: 256
hidden_size: 4096
num_layers: 32
num_heads: 32
vocab_size: 151936
intermediate_size: 11008
qkv_has_bias: True
rms_norm_eps: 1.0e-6
theta: 1000000.0
max_position_embedding: 32768
emb_dropout_prob: 0.0
eos_token_id: 151643
pad_token_id: 151643
compute_dtype: "bfloat16"
layernorm_compute_type: "float32"
softmax_compute_type: "float16"
rotary_dtype: "float16"
param_init_type: "float32"
use_past: False
extend_method: "None" # support "None", "PI", "NTK"
use_flash_attention: True
fine_grain_interleave: 1
qkv_concat: False
block_size: 32
num_blocks: 128
offset: 0
checkpoint_name_or_path: ""
repetition_penalty: 1
max_decode_length: 512
top_k: 0
top_p: 0.8
do_sample: False
compute_in_2d: True
# configuration items copied from Qwen
pet_config:
pet_type: lora
# configuration of lora
lora_rank: 64
lora_alpha: 16
lora_dropout: 0.05
target_modules: '.*wq|.*wk|.*wv|.*wo|.*w1|.*w2|.*w3'
freeze_exclude: ["*wte*", "*lm_head*"]
rotary_pct: 1.0
rotary_emb_base: 1000000
kv_channels: 128
arch:
type: LlamaForCausalLM
processor:
return_tensors: ms
tokenizer:
model_max_length: 4096
vocab_file: "/path/vocab.json"
merges_file: "/path/merges.txt"
unk_token: "<|endoftext|>"
eos_token: "<|endoftext|>"
pad_token: "<|endoftext|>"
type: Qwen2Tokenizer
type: Qwen2Processor
processor:
return_tensors: ms
tokenizer:
model_max_length: 4096
vocab_file: "/path/vocab.json"
merges_file: "/path/merges.txt"
unk_token: "<|endoftext|>"
eos_token: "<|endoftext|>"
pad_token: "<|endoftext|>"
type: Qwen2Tokenizer
type: Qwen2Processor
# metric
metric:
type: PerplexityMetric
eval_callbacks:
- type: ObsMonitor
auto_tune: False
filepath_prefix: './autotune'
autotune_per_step: 10
profile: False
profile_start_step: 1
profile_stop_step: 10
init_start_profile: False
profile_communication: False
profile_memory: True
layer_scale: False
layer_decay: 0.65
lr_scale_factor: 256
# aicc
remote_save_url: "Please input obs url on AICC platform."
2.3 报错信息
"trainer': {'model_name' :'qwen2_7b','type': 'CausalLanguageModelingTrainer'},
"use parallel': False}
2024-10-09 14:09:44,272 - mindfo mers[mindfommers/trainer/base trainer.py:787] - INFO - .Model Compiling, Please Wait a Moment
[WARNING] ME(1739195:281472916631776, MainProcess):2024-10-09- 14:09:44.273.000 [mindspo re/train/model.py:1328] For MFLossMonitor callback, f'step_begin', 'step.end', 'epoch_end', 'epoch_begin'} methods may not be supported in later version, Use methods prefixed with 'on_train' or 'on_eval' instead when using customized callbacks-
[WARNING] ME(1739195:281472916631776,MainProcess) :2024-10-09-14:09:44.273.000 [mindspore/train/model .py:1328] For Local20bsMonitor callback, {'epoch_end', 'steend'} methods may not be supported in later version, Use methods prefixed with "on train' or 'on eval' instead when using customized callbacks.(ERRORJ DEVICE (1739195,ffff8535b0e0.python) :2024-10-09- 14:12:37.081.576 [mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_vmm_adapter.cc:206] AllocDeviceMem] Reserve memory address failed.
[ERROR] PRE_ACT (1739195, ffff8535b0e0,python) :2024-10-09-14: 12:37.081.722 [mindspore/ccs rc/backend/common/mem_reuse/mem_dynamic_allocator.cc:414] AddMemBlockAnd
MemBufByEagerFree] AllocDeviceMemByEagerFree failed, alloc size : 0. [ERRORI RUNTIME FRAMEWORK(1739195, ffff8535b0e0,python) :2024-10-09- 14:12:37.081.857 [mindspore/ccsrc/runtime/graph_scheduler/actor/data_prepare_actor.cc:129] SyncTensorData] #umsg#Memory not enough:#umsg#Device( id:0) memory isn't enough and alloc failed, kernel name: Default/data-895, alloc size: 2B.
IERRORI DEVICE(1739195, ffff8535b0e0.python) :2024-10-09-14:12:37.082 -022 [mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_vmm_adapter.cc:206] AllocDeviceMem] Reserve memory address failed.
TERROR] PRE ACT( 1739195, ffff8535b0e0, python) :2024-10-09-14:12:37.082.057 [mindspore/ccsrc/backend/common/mem_ reuse/mem dynamic_allocato r.cc:414] AddMemBlockAndMemBufByEagerFree] AllocDeviceMemByEagerFree failed, alloc size : 0.
(ERROR]'RUNTIME FRAMEWORK(1739195, ffff8535b0e0,python) :2024-10-09-14: 12:37.082.100 [mindspore/ccsrc/runtime/graph scheduler/actor/data prepare actor.cc:129] SyncTensorData] #umsg#Memory not enough:#umsg#Deviće(id:0) memory isn't enough and alloc failed, kernel name: Default/data-1360, alloc size: 16B.
(ERRORI DEVICE(1739195, ffff8535b0e0python) :2024-10-09-14:12:37.082.239 [mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_wmm_adapter .cc:206] AllocDeviceMem] Reserve memory address failed.
[ERROR] PRE_ ACT (1739195, ffff8535b0e0,python) :2024-10-09-14: 12:37.082.273 [mindspore/ccs rc/backend/common/mem_reuse/mem_dynamic_allocator.cc:414] AddMemBlockAndMemBufByEagerFree] AllocDeviceMemByEagerFree failed, alloc size : 0.
(ERROR] RUNT IME_FRAMEWORK(1739195, ffff8535b0e0,python) :2024-10-09-14: 12:37.082.313 [mindspore/ccsrc/runtime/graph_scheduler/actor/data_prepare_actor.cc:129] SyncTensorData] #umsg#Memory not enough:#umsg#Device(id:0) memory isn't enough and alloc failed, kernel name: Default/data-1658, alloc size: 2B.
(ERROR] DEVICE(1739195, ffff8535b0e0,python) :2024-10-09-14: 12:37.082.424 [mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_vmm_adapter.cc:206] AllocDeviceMem] Reserve memory address failed.
[ERROR] PRE_ACT(1739195, ffff8535b0e0,python) :2024-10-09-14: 12:37.082.457 [mindspore/ccsrc/backend/common/mem_reuse/mem_dynamic_allocator.cc:414] AddMemBlockAndMemBufByEagerFree] AllocDeviceMemByEagerFree failed, alloc size : 0.
[ERROR] RUNT IME_FRAMEWORK(1739195, ffff8535b0e0,python) :2024-10-09-14: 12:37.082.519 [mindspore/ccsrc/runtime/graph_scheduler/actor/data_prepare_actor.cc:129] SyncTensorData] #umsg#Memory not enough:#umsg#Device( id:0) memory isn't enough and alloc failed, kernel name: Default/data-862, alloc size: 2B.
[ERROR] DEVICE (1739195, ffff8535b0e0,python) :2024-10-09-14: 12:37.082.648 Imindspore/ccsrc/plugin/device/ascend/hal/device/ascend_vmm_adapter.cc:206] AllocDeviceMem] Reserve memory address failed.
[ERROR] PRE ACT(1739195, ffff8535b0e0, python): :2024-10-09-14:12:37.082.679 [mindspore/ccsrc/backend/common/mem_reuse/mem_dynamic_allocato r.cc:414] AddMemBlockAndMemBufByEagerFree] AllocDeviceMemByEagerFree failed, alloc size : 0.
(ERROR] RUNTIME FRAMEWORK(1739195, ffff8535b0e0,python) :2024-10-09-14: 12:37.082.719 [mindspore/ccsrc/runtime/graph scheduler/actor/data prepare actor.cc:129] SyncTensorData] #umsg#Memory not enough:#umsg#Device( id:0) memory isn't enough and alloc failed, kernel name: Default/data-1339,- alloc size: 16B.
[ERROR] DEVICE (1739195,ffff8535b0e0,python) :2024-10-09-14:12:37.082.832 [mindspo re/ccsrc/plugin/device/ascend/hal/device/ascend_vmm_adapter .cc:206] AllocDeviceMem] Reserve memory address failed.
[ERROR] PRE. ACT(1739195, ffff8535b0e0,python) :2024-10-09-14: 12:37.082.863 [mindspore/ccs rc/backend/common/mem_reuse/mem_dynamic_allocator.cc:414] AddMemBlockAndMemBufByEagerFree] AllocDeviceMemByEagerFree failed, alloc size
3 根因分析
这个错误提示是显存不足了
4 解决方案
配置文件里配置的是8个64G的昇腾卡使用模型并行训练的,如果卡数不够肯定显存不够的。可以多加卡试试。