網頁

2025年12月30日 星期二

DGX Spark 使用 TRT LLM

參考 https://build.nvidia.com/spark 之下的
TRT LLM for Inference
NVFP4 Quantization
參考 https://nvidia.github.io/TensorRT-LLM/1.0.0rc2/commands/trtllm-serve.html

# Configure Docker permissions
$ sudo usermod -aG docker $USER
$ newgrp docker
$ id
uid=1000(spark) gid=988(docker) groups=988(docker),4(adm),27(sudo),29(audio),30(dip),46(plugdev),100(users),122(lpadmin),1000(spark)
$ ps
    PID TTY          TIME CMD
   6123 pts/1    00:00:00 bash
  24590 pts/1    00:00:00 bash
  24597 pts/1    00:00:00 ps

# Verify environment prerequisites
$ nvidia-smi
$ docker run --rm --gpus all nvcr.io/nvidia/tensorrt-llm/release:spark-single-gpu-dev nvidia-smi

$ export HF_TOKEN=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
$ docker run --rm -it --gpus all \
  nvcr.io/nvidia/tensorrt-llm/release:spark-single-gpu-dev \
  python -c "import tensorrt_llm; print(f'TensorRT-LLM version: {tensorrt_llm.__version__}')"
倒數兩行的輸出
[TensorRT-LLM] TensorRT-LLM version: 1.1.0rc3
TensorRT-LLM version: 1.1.0rc3

# Create Hugging Face cache directory
$ mkdir -p $HOME/.cache/huggingface/
## 若有需要改變目錄位置
$ export HF_HOME=/mnt/Data/huggingface

$ export MODEL_HANDLE="openai/gpt-oss-20b"
$ docker run \
  -e MODEL_HANDLE=$MODEL_HANDLE \
  -e HF_TOKEN=$HF_TOKEN \
  -v $HOME/.cache/huggingface/:/root/.cache/huggingface/ \
  --rm -it --ulimit memlock=-1 --ulimit stack=67108864 \
  --gpus=all --ipc=host --network host \
  nvcr.io/nvidia/tensorrt-llm/release:spark-single-gpu-dev \
  bash -c '
    export TIKTOKEN_ENCODINGS_BASE="/tmp/harmony-reqs" && \
    mkdir -p $TIKTOKEN_ENCODINGS_BASE && \
    wget -P $TIKTOKEN_ENCODINGS_BASE https://openaipublic.blob.core.windows.net/encodings/o200k_base.tiktoken && \
    wget -P $TIKTOKEN_ENCODINGS_BASE https://openaipublic.blob.core.windows.net/encodings/cl100k_base.tiktoken && \
    hf download $MODEL_HANDLE && \
    python examples/llm-api/quickstart_advanced.py \
      --model_dir $MODEL_HANDLE \
      --prompt "Paris is great because" \
      --max_tokens 64
    '

==================
# Serve LLM with OpenAI-compatible API
$ export MODEL_HANDLE="openai/gpt-oss-20b"
$ export MODEL_HANDLE="openai/gpt-oss-120b"
$ export MODEL_HANDLE="meta-llama/Llama-3.3-70B-Instruct"
$ export MODEL_HANDLE="Qwen/Qwen3-4B-Instruct-2507"
$ export MODEL_HANDLE="deepseek-ai/DeepSeek-R1-Distill-Llama-8B'"

$ docker run --name trtllm_llm_server --rm -it --gpus all --ipc host --network host \
  -e HF_TOKEN=$HF_TOKEN \
  -e MODEL_HANDLE="$MODEL_HANDLE" \
  -v $HOME/.cache/huggingface/:/root/.cache/huggingface/ \
  nvcr.io/nvidia/tensorrt-llm/release:spark-single-gpu-dev \
  bash -c '
    export TIKTOKEN_ENCODINGS_BASE="/tmp/harmony-reqs" && \
    mkdir -p $TIKTOKEN_ENCODINGS_BASE && \
    wget -P $TIKTOKEN_ENCODINGS_BASE https://openaipublic.blob.core.windows.net/encodings/o200k_base.tiktoken && \
    wget -P $TIKTOKEN_ENCODINGS_BASE https://openaipublic.blob.core.windows.net/encodings/cl100k_base.tiktoken && \
    hf download $MODEL_HANDLE && \
    cat > /tmp/extra-llm-api-config.yml <<EOF
print_iter_log: false
kv_cache_config:
  dtype: "auto"
  free_gpu_memory_fraction: 0.4
cuda_graph_config:
  enable_padding: true
disable_overlap_scheduler: true
EOF
    trtllm-serve "$MODEL_HANDLE" \
      --max_batch_size 8 \
      --trust_remote_code \
      --host 0.0.0.0 \
      --port 8000 \
      --extra_llm_api_options /tmp/extra-llm-api-config.yml
  '
$ curl -s http://localhost:8000/v1/chat/completions \
  -H "Content-Type: application/json" \
  -d '{
    "model": "'"$MODEL_HANDLE"'",
    "messages": [{"role": "user", "content": "請你自我介紹"}],
    "max_tokens": 64
  }'

# Cleanup and rollback
sudo chown -R "$USER:$USER" "$HOME/.cache/huggingface"
rm -rf $HOME/.cache/huggingface/
docker image prune -f
docker rmi nvcr.io/nvidia/tensorrt-llm/release:spark-single-gpu-dev


==================
# NVFP4 Quantization
$ mkdir -p ./output_models
$ chmod 755 ./output_models
# 使用 huggingface 的模型, 轉換模型
$ docker run --rm -it --gpus all --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 \
  -v "./output_models:/workspace/output_models" \
  -v "$HOME/.cache/huggingface:/root/.cache/huggingface" \
  -e HF_TOKEN=$HF_TOKEN \
  nvcr.io/nvidia/tensorrt-llm/release:spark-single-gpu-dev \
  bash -c "
    git clone -b 0.35.0 --single-branch https://github.com/NVIDIA/Model-Optimizer.git /app/TensorRT-Model-Optimizer && \
    cd /app/TensorRT-Model-Optimizer && pip install -e '.[dev]' && \
    export ROOT_SAVE_PATH='/workspace/output_models' && \
    /app/TensorRT-Model-Optimizer/examples/llm_ptq/scripts/huggingface_example.sh \
    --model $MODEL_HANDLE \
    --quant nvfp4 \
    --tp 1 \
    --export_fmt hf
  "
# 出現 pynvml.NVMLError_NotSupported: Not Supported 錯誤,不用怕
# 使用本地端的模型,轉換模型
$ docker run --rm -it --gpus all --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 \
  -v "./output_models:/workspace/output_models" \
  -v /mnt/models:/mnt/models \
  -v "$HOME/.cache/huggingface:/root/.cache/huggingface" \
  -e HF_TOKEN=$HF_TOKEN \
  nvcr.io/nvidia/tensorrt-llm/release:spark-single-gpu-dev \
  bash -c "
    git clone -b 0.35.0 --single-branch https://github.com/NVIDIA/Model-Optimizer.git /app/TensorRT-Model-Optimizer && \
    cd /app/TensorRT-Model-Optimizer && pip install -e '.[dev]' && \
    export ROOT_SAVE_PATH='/workspace/output_models' && \
    /app/TensorRT-Model-Optimizer/examples/llm_ptq/scripts/huggingface_example.sh \
    --model /mnt/models/Qwen2.5-Coder-7B \
    --quant nvfp4 \
    --tp 1 \
    --export_fmt hf
  "

$ ls -la ./output_models/
$ find ./output_models/ -name "*.bin" -o -name "*.safetensors" -o -name "config.json"
$ export MODEL_PATH="./output_models/saved_models_DeepSeek-R1-Distill-Llama-8B_nvfp4_hf/"
$ export MODEL_PATH="./output_models/saved_models_Qwen3-4B-Instruct-2507_nvfp4_hf/"
# 使用轉換完成的模型
$ docker run \
  -e HF_TOKEN=$HF_TOKEN \
  -v $HOME/.cache/huggingface/:/root/.cache/huggingface/ \
  -v "$MODEL_PATH:/workspace/model" \
  --rm -it --ulimit memlock=-1 --ulimit stack=67108864 \
  --gpus=all --ipc=host --network host \
  nvcr.io/nvidia/tensorrt-llm/release:spark-single-gpu-dev \
  bash -c '
    python examples/llm-api/quickstart_advanced.py \
      --model_dir /workspace/model/ \
      --prompt "Paris is great because" \
      --max_tokens 64
    '
# Serve the model with OpenAI-compatible API
$ docker run \
  -e HF_TOKEN=$HF_TOKEN \
  -v "$MODEL_PATH:/workspace/model" \
  --rm -it --ulimit memlock=-1 --ulimit stack=67108864 \
  --gpus=all --ipc=host --network host \
  nvcr.io/nvidia/tensorrt-llm/release:spark-single-gpu-dev \
  trtllm-serve /workspace/model \
    --backend pytorch \
    --max_batch_size 4 \
    --host 0.0.0.0 \
    --port 8000
$ curl -X POST http://localhost:8000/v1/chat/completions \
  -H "Content-Type: application/json" \
  -d '{
    "model": "openai/gpt-oss-20b",
    "messages": [{"role": "user", "content": "What is artificial intelligence?"}],
    "max_tokens": 100,
    "temperature": 0.7,
    "stream": false
  }'

==================
# trtllm-serve 使用本地端模型
$ export MODEL_HANDLE="/mnt/models/gpt-oss-20b"    # 0.8:42.6GB 48W 16s | 0.4:26.1GB 48W 21s | 0.2:26.6GB 46W 17s
$ export MODEL_HANDLE="/mnt/models/gpt-oss-120b"   # 0.8:  72GB 50W 35s | 0.5:70.4GB 49W 40s | 0.4:68.7GB 49W 39s 
$ docker run --name trtllm_llm_server --rm -it --gpus all --ipc host --network host \
  -e MODEL_HANDLE="$MODEL_HANDLE" \
  -v /mnt/models:/mnt/models \
  -v $HOME/.cache/huggingface/:/root/.cache/huggingface/ \
  nvcr.io/nvidia/tensorrt-llm/release:spark-single-gpu-dev \
  bash -c '
    export TIKTOKEN_ENCODINGS_BASE="/tmp/harmony-reqs" && \
    mkdir -p $TIKTOKEN_ENCODINGS_BASE && \
    wget -P $TIKTOKEN_ENCODINGS_BASE https://openaipublic.blob.core.windows.net/encodings/o200k_base.tiktoken && \
    wget -P $TIKTOKEN_ENCODINGS_BASE https://openaipublic.blob.core.windows.net/encodings/cl100k_base.tiktoken && \
    cat > /tmp/extra-llm-api-config.yml <<EOF
print_iter_log: false
kv_cache_config:
  dtype: "auto"
  free_gpu_memory_fraction: 0.5
cuda_graph_config:
  enable_padding: true
disable_overlap_scheduler: true
EOF

    trtllm-serve "$MODEL_HANDLE" \
      --max_batch_size 8 \
      --max_seq_len 65536 \
      --max_num_tokens 131072 \
      --trust_remote_code \
      --host 0.0.0.0 \
      --port 8000 \
      --extra_llm_api_options /tmp/extra-llm-api-config.yml
  '

沒有留言:

張貼留言