benchmark.sh 1.0 KB

123456789101112131415161718192021222324252627282930313233343536
  1. # All rights `PaddleDetection` reserved
  2. #!/bin/bash
  3. model_dir=$1
  4. model_name=$2
  5. export img_dir="demo"
  6. export log_path="output_pipeline"
  7. echo "model_dir : ${model_dir}"
  8. echo "img_dir: ${img_dir}"
  9. # TODO: support batch size>1
  10. for use_mkldnn in "True" "False"; do
  11. for threads in "1" "6"; do
  12. echo "${model_name} ${model_dir}, use_mkldnn: ${use_mkldnn} threads: ${threads}"
  13. python deploy/python/infer.py \
  14. --model_dir=${model_dir} \
  15. --run_benchmark True \
  16. --enable_mkldnn=${use_mkldnn} \
  17. --device=CPU \
  18. --cpu_threads=${threads} \
  19. --image_dir=${img_dir} 2>&1 | tee ${log_path}/${model_name}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_bs1_infer.log
  20. done
  21. done
  22. for run_mode in "fluid" "trt_fp32" "trt_fp16"; do
  23. echo "${model_name} ${model_dir}, run_mode: ${run_mode}"
  24. python deploy/python/infer.py \
  25. --model_dir=${model_dir} \
  26. --run_benchmark=True \
  27. --device=GPU \
  28. --run_mode=${run_mode} \
  29. --image_dir=${img_dir} 2>&1 | tee ${log_path}/${model_name}_gpu_runmode_${run_mode}_bs1_infer.log
  30. done