diff --git a/.github/configs/amd-master.yaml b/.github/configs/amd-master.yaml index b9560803e..2ea215fbe 100644 --- a/.github/configs/amd-master.yaml +++ b/.github/configs/amd-master.yaml @@ -293,6 +293,28 @@ glm5-fp8-mi355x-sglang: search-space: - { tp: 8, conc-start: 4, conc-end: 64 } +glm5-fp8-mi355x-atom: + image: TBD + model: zai-org/GLM-5-FP8 + model-prefix: glm5 + runner: mi355x + precision: fp8 + framework: atom + multinode: false + seq-len-configs: + - isl: 1024 + osl: 1024 + search-space: + - { tp: 8, conc-start: 4, conc-end: 128 } + - isl: 1024 + osl: 8192 + search-space: + - { tp: 8, conc-start: 4, conc-end: 128 } + - isl: 8192 + osl: 1024 + search-space: + - { tp: 8, conc-start: 4, conc-end: 128 } + kimik2.5-int4-mi355x-vllm: image: vllm/vllm-openai-rocm:v0.18.0 model: moonshotai/Kimi-K2.5 @@ -363,6 +385,31 @@ kimik2.5-fp4-mi355x-vllm: - { tp: 8, conc-start: 4, conc-end: 64 } - { tp: 4, conc-start: 4, conc-end: 64 } +kimik2.5-fp4-mi355x-atom: + image: TBD + model: amd/Kimi-K2.5-MXFP4 + model-prefix: kimik2.5 + runner: mi355x + precision: fp4 + framework: atom + multinode: false + seq-len-configs: + - isl: 1024 + osl: 1024 + search-space: + - { tp: 8, conc-start: 4, conc-end: 128 } + - { tp: 4, conc-start: 4, conc-end: 128 } + - isl: 1024 + osl: 8192 + search-space: + - { tp: 8, conc-start: 4, conc-end: 128 } + - { tp: 4, conc-start: 4, conc-end: 128 } + - isl: 8192 + osl: 1024 + search-space: + - { tp: 8, conc-start: 4, conc-end: 128 } + - { tp: 4, conc-start: 4, conc-end: 128 } + minimaxm2.5-fp8-mi355x-vllm: image: vllm/vllm-openai-rocm:v0.18.0 model: MiniMaxAI/MiniMax-M2.5 @@ -391,6 +438,34 @@ minimaxm2.5-fp8-mi355x-vllm: - { tp: 4, conc-start: 4, conc-end: 64 } - { tp: 8, ep: 8, conc-start: 32, conc-end: 256 } +minimaxm2.5-fp8-mi355x-atom: + image: TBD + model: MiniMaxAI/MiniMax-M2.5 + model-prefix: minimaxm2.5 + runner: mi355x + precision: fp8 + framework: atom + multinode: false + seq-len-configs: + - isl: 1024 + osl: 1024 + search-space: + - { tp: 2, conc-start: 4, conc-end: 128 } + - { tp: 4, conc-start: 4, conc-end: 128 } + - { tp: 8, ep: 8, conc-start: 32, conc-end: 256 } + - isl: 1024 + osl: 8192 + search-space: + - { tp: 2, conc-start: 4, conc-end: 128 } + - { tp: 4, conc-start: 4, conc-end: 128 } + - { tp: 8, ep: 8, conc-start: 32, conc-end: 256 } + - isl: 8192 + osl: 1024 + search-space: + - { tp: 2, conc-start: 4, conc-end: 128 } + - { tp: 4, conc-start: 4, conc-end: 128 } + - { tp: 8, ep: 8, conc-start: 32, conc-end: 256 } + minimaxm2.5-fp8-mi300x-vllm: image: vllm/vllm-openai-rocm:v0.16.0 model: MiniMaxAI/MiniMax-M2.5 diff --git a/benchmarks/single_node/glm5_fp8_mi355x_atom.sh b/benchmarks/single_node/glm5_fp8_mi355x_atom.sh new file mode 100644 index 000000000..960363bb6 --- /dev/null +++ b/benchmarks/single_node/glm5_fp8_mi355x_atom.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash + +source "$(dirname "$0")/../benchmark_lib.sh" + +check_env_vars \ + MODEL \ + TP \ + CONC \ + ISL \ + OSL \ + RANDOM_RANGE_RATIO \ + RESULT_FILENAME \ + EP_SIZE \ + DP_ATTENTION + +if [[ -n "$SLURM_JOB_ID" ]]; then + echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME" +fi + +echo "TP: $TP, CONC: $CONC, ISL: $ISL, OSL: $OSL, EP_SIZE: $EP_SIZE, DP_ATTENTION: $DP_ATTENTION" + +SERVER_LOG=/workspace/server.log +PORT=${PORT:-8888} + +export OMP_NUM_THREADS=1 + +# Calculate max-model-len based on ISL and OSL +if [ "$ISL" = "1024" ] && [ "$OSL" = "1024" ]; then + CALCULATED_MAX_MODEL_LEN="" +else + CALCULATED_MAX_MODEL_LEN=" --max-model-len 10240 " +fi + +if [ "$EP_SIZE" -gt 1 ]; then + EP=" --enable-expert-parallel" +else + EP=" " +fi + +# Start GPU monitoring (power, temperature, clocks every second) +start_gpu_monitor + +set -x + +python3 -m atom.entrypoints.openai_server \ + --model $MODEL \ + --server-port $PORT \ + -tp $TP \ + --kv_cache_dtype fp8 $CALCULATED_MAX_MODEL_LEN $EP \ + --trust-remote-code \ + > $SERVER_LOG 2>&1 & + +SERVER_PID=$! + +# Wait for server to be ready +wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID" + +export PYTHONDONTWRITEBYTECODE=1 +run_benchmark_serving \ + --model "$MODEL" \ + --port "$PORT" \ + --backend vllm \ + --input-len "$ISL" \ + --output-len "$OSL" \ + --random-range-ratio "$RANDOM_RANGE_RATIO" \ + --num-prompts "$((CONC * 10))" \ + --max-concurrency "$CONC" \ + --result-filename "$RESULT_FILENAME" \ + --result-dir /workspace/ + +# After throughput, run evaluation only if RUN_EVAL is true +if [ "${RUN_EVAL}" = "true" ]; then + run_eval --framework lm-eval --port "$PORT" --concurrent-requests $CONC + append_lm_eval_summary +fi + +# Stop GPU monitoring +stop_gpu_monitor +set +x diff --git a/benchmarks/single_node/kimik2.5_fp4_mi355x_atom.sh b/benchmarks/single_node/kimik2.5_fp4_mi355x_atom.sh new file mode 100644 index 000000000..8f94c3829 --- /dev/null +++ b/benchmarks/single_node/kimik2.5_fp4_mi355x_atom.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash + +source "$(dirname "$0")/../benchmark_lib.sh" + +check_env_vars \ + MODEL \ + TP \ + CONC \ + ISL \ + OSL \ + RANDOM_RANGE_RATIO \ + RESULT_FILENAME \ + EP_SIZE \ + DP_ATTENTION + +if [[ -n "$SLURM_JOB_ID" ]]; then + echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME" +fi + +echo "TP: $TP, CONC: $CONC, ISL: $ISL, OSL: $OSL, EP_SIZE: $EP_SIZE, DP_ATTENTION: $DP_ATTENTION" + +SERVER_LOG=/workspace/server.log +PORT=${PORT:-8888} + +export OMP_NUM_THREADS=1 + +# Calculate max-model-len based on ISL and OSL +if [ "$ISL" = "1024" ] && [ "$OSL" = "1024" ]; then + CALCULATED_MAX_MODEL_LEN="" +else + CALCULATED_MAX_MODEL_LEN=" --max-model-len 10240 " +fi + +if [ "$EP_SIZE" -gt 1 ]; then + EP=" --enable-expert-parallel" +else + EP=" " +fi + +# Start GPU monitoring (power, temperature, clocks every second) +start_gpu_monitor + +set -x + +python3 -m atom.entrypoints.openai_server \ + --model $MODEL \ + --server-port $PORT \ + -tp $TP \ + --kv_cache_dtype fp8 $CALCULATED_MAX_MODEL_LEN $EP \ + --trust-remote-code \ + > $SERVER_LOG 2>&1 & + +SERVER_PID=$! + +# Wait for server to be ready +wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID" + +export PYTHONDONTWRITEBYTECODE=1 +run_benchmark_serving \ + --model "$MODEL" \ + --port "$PORT" \ + --backend vllm \ + --input-len "$ISL" \ + --output-len "$OSL" \ + --random-range-ratio "$RANDOM_RANGE_RATIO" \ + --num-prompts "$((CONC * 10))" \ + --max-concurrency "$CONC" \ + --result-filename "$RESULT_FILENAME" \ + --result-dir /workspace/ \ + --trust-remote-code + +# After throughput, run evaluation only if RUN_EVAL is true +if [ "${RUN_EVAL}" = "true" ]; then + run_eval --framework lm-eval --port "$PORT" --concurrent-requests $CONC + append_lm_eval_summary +fi + +# Stop GPU monitoring +stop_gpu_monitor +set +x diff --git a/benchmarks/single_node/minimaxm2.5_fp8_mi355x_atom.sh b/benchmarks/single_node/minimaxm2.5_fp8_mi355x_atom.sh new file mode 100644 index 000000000..8f94c3829 --- /dev/null +++ b/benchmarks/single_node/minimaxm2.5_fp8_mi355x_atom.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash + +source "$(dirname "$0")/../benchmark_lib.sh" + +check_env_vars \ + MODEL \ + TP \ + CONC \ + ISL \ + OSL \ + RANDOM_RANGE_RATIO \ + RESULT_FILENAME \ + EP_SIZE \ + DP_ATTENTION + +if [[ -n "$SLURM_JOB_ID" ]]; then + echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME" +fi + +echo "TP: $TP, CONC: $CONC, ISL: $ISL, OSL: $OSL, EP_SIZE: $EP_SIZE, DP_ATTENTION: $DP_ATTENTION" + +SERVER_LOG=/workspace/server.log +PORT=${PORT:-8888} + +export OMP_NUM_THREADS=1 + +# Calculate max-model-len based on ISL and OSL +if [ "$ISL" = "1024" ] && [ "$OSL" = "1024" ]; then + CALCULATED_MAX_MODEL_LEN="" +else + CALCULATED_MAX_MODEL_LEN=" --max-model-len 10240 " +fi + +if [ "$EP_SIZE" -gt 1 ]; then + EP=" --enable-expert-parallel" +else + EP=" " +fi + +# Start GPU monitoring (power, temperature, clocks every second) +start_gpu_monitor + +set -x + +python3 -m atom.entrypoints.openai_server \ + --model $MODEL \ + --server-port $PORT \ + -tp $TP \ + --kv_cache_dtype fp8 $CALCULATED_MAX_MODEL_LEN $EP \ + --trust-remote-code \ + > $SERVER_LOG 2>&1 & + +SERVER_PID=$! + +# Wait for server to be ready +wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID" + +export PYTHONDONTWRITEBYTECODE=1 +run_benchmark_serving \ + --model "$MODEL" \ + --port "$PORT" \ + --backend vllm \ + --input-len "$ISL" \ + --output-len "$OSL" \ + --random-range-ratio "$RANDOM_RANGE_RATIO" \ + --num-prompts "$((CONC * 10))" \ + --max-concurrency "$CONC" \ + --result-filename "$RESULT_FILENAME" \ + --result-dir /workspace/ \ + --trust-remote-code + +# After throughput, run evaluation only if RUN_EVAL is true +if [ "${RUN_EVAL}" = "true" ]; then + run_eval --framework lm-eval --port "$PORT" --concurrent-requests $CONC + append_lm_eval_summary +fi + +# Stop GPU monitoring +stop_gpu_monitor +set +x diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 6a8a6e666..610c4ef96 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1108,3 +1108,12 @@ description: - "Update vLLM image from v0.15.1 to v0.18.0 for gptoss H100 and H200 configs" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/960 + +- config-keys: + - kimik2.5-fp4-mi355x-atom + - glm5-fp8-mi355x-atom + - minimaxm2.5-fp8-mi355x-atom + description: + - "New model support on ATOM framework" + - "Kimi-K2.5 FP4, GLM-5 FP8, and MiniMax-M2.5 FP8 configs added for MI355X ATOM" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/963