test_serving_infer_python.sh 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. #!/bin/bash
  2. source test_tipc/utils_func.sh
  3. FILENAME=$1
  4. MODE="serving_infer"
  5. # parser model_name
  6. dataline=$(cat ${FILENAME})
  7. IFS=$'\n'
  8. lines=(${dataline})
  9. model_name=$(func_parser_value "${lines[1]}")
  10. echo "ppdet serving_python_infer: ${model_name}"
  11. python=$(func_parser_value "${lines[2]}")
  12. filename_key=$(func_parser_key "${lines[3]}")
  13. filename_value=$(func_parser_value "${lines[3]}")
  14. # parser export params
  15. save_export_key=$(func_parser_key "${lines[5]}")
  16. save_export_value=$(func_parser_value "${lines[5]}")
  17. export_weight_key=$(func_parser_key "${lines[6]}")
  18. export_weight_value=$(func_parser_value "${lines[6]}")
  19. norm_export=$(func_parser_value "${lines[7]}")
  20. pact_export=$(func_parser_value "${lines[8]}")
  21. fpgm_export=$(func_parser_value "${lines[9]}")
  22. distill_export=$(func_parser_value "${lines[10]}")
  23. export_key1=$(func_parser_key "${lines[11]}")
  24. export_value1=$(func_parser_value "${lines[11]}")
  25. export_key2=$(func_parser_key "${lines[12]}")
  26. export_value2=$(func_parser_value "${lines[12]}")
  27. kl_quant_export=$(func_parser_value "${lines[13]}")
  28. # parser serving params
  29. infer_mode_list=$(func_parser_value "${lines[15]}")
  30. infer_is_quant_list=$(func_parser_value "${lines[16]}")
  31. web_service_py=$(func_parser_value "${lines[17]}")
  32. model_dir_key=$(func_parser_key "${lines[18]}")
  33. opt_key=$(func_parser_key "${lines[19]}")
  34. opt_use_gpu_list=$(func_parser_value "${lines[19]}")
  35. web_service_key1=$(func_parser_key "${lines[20]}")
  36. web_service_value1=$(func_parser_value "${lines[20]}")
  37. http_client_py=$(func_parser_value "${lines[21]}")
  38. infer_image_key=$(func_parser_key "${lines[22]}")
  39. infer_image_value=$(func_parser_value "${lines[22]}")
  40. http_client_key1=$(func_parser_key "${lines[23]}")
  41. http_client_value1=$(func_parser_value "${lines[23]}")
  42. LOG_PATH="./test_tipc/output/${model_name}/${MODE}"
  43. mkdir -p ${LOG_PATH}
  44. status_log="${LOG_PATH}/results_serving_python.log"
  45. function func_serving_inference(){
  46. IFS='|'
  47. _python=$1
  48. _log_path=$2
  49. _service_script=$3
  50. _client_script=$4
  51. _set_model_dir=$5
  52. _set_image_file=$6
  53. set_web_service_params1=$(func_set_params "${web_service_key1}" "${web_service_value1}")
  54. set_http_client_params1=$(func_set_params "${http_client_key1}" "${http_client_value1}")
  55. # inference
  56. for opt in ${opt_use_gpu_list[*]}; do
  57. device_type=$(func_parser_key "${opt}")
  58. server_log_path="${_log_path}/python_server_${device_type}.log"
  59. client_log_path="${_log_path}/python_client_${device_type}.log"
  60. opt_value=$(func_parser_value "${opt}")
  61. _set_opt=$(func_set_params "${opt_key}" "${opt_value}")
  62. # run web service
  63. web_service_cmd="${_python} ${_service_script} ${_set_model_dir} ${_set_opt} ${set_web_service_params1} > ${server_log_path} 2>&1 &"
  64. eval $web_service_cmd
  65. last_status=${PIPESTATUS[0]}
  66. cat ${server_log_path}
  67. status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" "${server_log_path}"
  68. sleep 5s
  69. # run http client
  70. http_client_cmd="${_python} ${_client_script} ${_set_image_file} ${set_http_client_params1} > ${client_log_path} 2>&1"
  71. eval $http_client_cmd
  72. last_status=${PIPESTATUS[0]}
  73. cat ${client_log_path}
  74. status_check $last_status "${http_client_cmd}" "${status_log}" "${model_name}" "${client_log_path}"
  75. ps ux | grep -E 'web_service' | awk '{print $2}' | xargs kill -s 9
  76. sleep 2s
  77. done
  78. }
  79. # set cuda device
  80. GPUID=$3
  81. if [ ${#GPUID} -le 0 ];then
  82. env="export CUDA_VISIBLE_DEVICES=0"
  83. else
  84. env="export CUDA_VISIBLE_DEVICES=${GPUID}"
  85. fi
  86. eval $env
  87. # run serving infer
  88. Count=0
  89. IFS="|"
  90. infer_quant_flag=(${infer_is_quant_list})
  91. for infer_mode in ${infer_mode_list[*]}; do
  92. if [ ${infer_mode} != "null" ]; then
  93. # run export
  94. case ${infer_mode} in
  95. norm) run_export=${norm_export} ;;
  96. quant) run_export=${pact_export} ;;
  97. fpgm) run_export=${fpgm_export} ;;
  98. distill) run_export=${distill_export} ;;
  99. kl_quant) run_export=${kl_quant_export} ;;
  100. *) echo "Undefined infer_mode!"; exit 1;
  101. esac
  102. set_export_weight=$(func_set_params "${export_weight_key}" "${export_weight_value}")
  103. set_save_export_dir=$(func_set_params "${save_export_key}" "${save_export_value}")
  104. set_filename=$(func_set_params "${filename_key}" "${model_name}")
  105. export_log_path="${LOG_PATH}/export.log"
  106. export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} "
  107. echo $export_cmd
  108. eval "${export_cmd} > ${export_log_path} 2>&1"
  109. status_export=$?
  110. cat ${export_log_path}
  111. status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}"
  112. fi
  113. #run inference
  114. set_export_model_dir=$(func_set_params "${model_dir_key}" "${save_export_value}/${model_name}")
  115. set_infer_image_file=$(func_set_params "${infer_image_key}" "${infer_image_value}")
  116. is_quant=${infer_quant_flag[Count]}
  117. func_serving_inference "${python}" "${LOG_PATH}" "${web_service_py}" "${http_client_py}" "${set_export_model_dir}" ${set_infer_image_file}
  118. Count=$(($Count + 1))
  119. done
  120. eval "unset CUDA_VISIBLE_DEVICES"