1、服务端部署
1.1 dockerfile编写
FROM registry.baidubce.com/paddlepaddle/serving:0.9.0-cuda10.1-cudnn7-devel
#FROM quadtalent-docker.pkg.coding.net/smart-city/city-wise/call-12345-server-prd:1.0.0.0330
COPY . /deploy
WORKDIR /deploy
# Install requirements
RUN pip config set global.index-url https://mirror.baidu.com/pypi/simple \
&& python3.7 -m pip install --upgrade setuptools \
&& python3.7 -m pip install --upgrade pip \
&& python3.7 -m pip install paddlepaddle-gpu==2.3.0.post101 \
-f https://www.paddlepaddle.org.cn/whl/linux/mkl/avx/stable.html \
&& pip3.7 install -r requirements.txt
ENTRYPOINT python3.7 web_service.py
1.2、制作镜像,启动容器指令
nvidia-docker build -t xx/ocr-python-server-qa:1.0.0.0630 .
nvidia-docker run -p 18091:18091 --name ocr-python-server-qa -d xx/ocr-python-server-qa:1.0.0.0630
nvidia-docker exec -it ocr-python-server-qa /bin/bash
docker logs -f ocr-python-server-qa
1.3、配置文件
#rpc端口, rpc_port和http_port不允许同时为空。当rpc_port为空且http_port不为空时,会自动将rpc_port设置为http_port+1
rpc_port: 18091
#http端口, rpc_port和http_port不允许同时为空。当rpc_port可用且http_port为空时,不自动生成http_port
http_port: 9998
#worker_num, 最大并发数。当build_dag_each_worker=True时, 框架会创建worker_num个进程,每个进程内构建grpcSever和DAG
##当build_dag_each_worker=False时,框架会设置主线程grpc线程池的max_workers=worker_num
worker_num: 10
#build_dag_each_worker, False,框架在进程内创建一条DAG;True,框架会每个进程内创建多个独立的DAG
build_dag_each_worker: False
dag:
#op资源类型, True, 为线程模型;False,为进程模型
is_thread_op: False
#重试次数
retry: 10
#使用性能分析, True,生成Timeline性能数据,对性能有一定影响;False为不使用
use_profile: True
tracer:
interval_s: 10
op:
det:
#并发数,is_thread_op=True时,为线程并发;否则为进程并发
concurrency: 2
#当op配置没有server_endpoints时,从local_service_conf读取本地服务配置
local_service_conf:
#client类型,包括brpc, grpc和local_predictor.local_predictor不启动Serving服务,进程内预测
client_type: local_predictor
#det模型路径
model_config: ./ppocr_det_v3_serving
#Fetch结果列表,以client_config中fetch_var的alias_name为准,不设置默认取全部输出变量
#fetch_list: ["sigmoid_0.tmp_0"]
#计算硬件ID,当devices为""或不写时为CPU预测;当devices为"0", "0,1,2"时为GPU预测,表示使用的GPU卡
devices: "0"
ir_optim: True
rec:
#并发数,is_thread_op=True时,为线程并发;否则为进程并发
concurrency: 1
#超时时间, 单位ms
timeout: -1
#Serving交互重试次数,默认不重试
retry: 1
#当op配置没有server_endpoints时,从local_service_conf读取本地服务配置
local_service_conf:
#client类型,包括brpc, grpc和local_predictor。local_predictor不启动Serving服务,进程内预测
client_type: local_predictor
#rec模型路径
model_config: ./ppocr_rec_v3_serving
#Fetch结果列表,以client_config中fetch_var的alias_name为准, 不设置默认取全部输出变量
#fetch_list:
#计算硬件ID,当devices为""或不写时为CPU预测;当devices为"0", "0,1,2"时为GPU预测,表示使用的GPU卡
devices: "0"
ir_optim: True
1.4、启动文件
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle_serving_server.web_service import WebService, Op
import logging
import numpy as np
import copy
import cv2
import base64
# from paddle_serving_app.reader import OCRReader
from ocr_reader import OCRReader, DetResizeForTest
from paddle_serving_app.reader import Sequential, ResizeByFactor
from paddle_serving_app.reader import Div, Normalize, Transpose
from paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes
_LOGGER = logging.getLogger()
class DetOp(Op):
def init_op(self):
self.det_preprocess = Sequential([
DetResizeForTest(), Div(255),
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose(
(2, 0, 1))
])
self.filter_func = FilterBoxes(10, 10)
self.post_func = DBPostProcess({
"thresh": 0.3,
"box_thresh": 0.6,
"max_candidates": 1000,
"unclip_ratio": 1.5,
"min_size": 3
})
def preprocess(self, input_dicts, data_id, log_id):
(_, input_dict), = input_dicts.items()
data = base64.b64decode(input_dict["image"].encode('utf8'))
self.raw_im = data
data = np.fromstring(data, np.uint8)
# Note: class variables(self.var) can only be used in process op mode
im = cv2.imdecode(data, cv2.IMREAD_COLOR)
self.ori_h, self.ori_w, _ = im.shape
det_img = self.det_preprocess(im)
_, self.new_h, self.new_w = det_img.shape
return {"x": det_img[np.newaxis, :].copy()}, False, None, ""
def postprocess(self, input_dicts, fetch_dict, data_id, log_id):
det_out = list(fetch_dict.values())[0]
ratio_list = [
float(self.new_h) / self.ori_h, float(self.new_w) / self.ori_w
]
dt_boxes_list = self.post_func(det_out, [ratio_list])
dt_boxes = self.filter_func(dt_boxes_list[0], [self.ori_h, self.ori_w])
out_dict = {"dt_boxes": dt_boxes, "image": self.raw_im}
return out_dict, None, ""
class RecOp(Op):
def init_op(self):
self.ocr_reader = OCRReader(
char_dict_path="./ppocr_keys_v1.txt")
self.get_rotate_crop_image = GetRotateCropImage()
self.sorted_boxes = SortedBoxes()
def preprocess(self, input_dicts, data_id, log_id):
(_, input_dict), = input_dicts.items()
raw_im = input_dict["image"]
data = np.frombuffer(raw_im, np.uint8)
im = cv2.imdecode(data, cv2.IMREAD_COLOR)
self.dt_list = input_dict["dt_boxes"]
self.dt_list = self.sorted_boxes(self.dt_list)
# deepcopy to save origin dt_boxes
dt_boxes = copy.deepcopy(self.dt_list)
feed_list = []
img_list = []
max_wh_ratio = 320/48.
## Many mini-batchs, the type of feed_data is list.
max_batch_size = 6 # len(dt_boxes)
# If max_batch_size is 0, skipping predict stage
if max_batch_size == 0:
return {}, True, None, ""
boxes_size = len(dt_boxes)
batch_size = boxes_size // max_batch_size
rem = boxes_size % max_batch_size
for bt_idx in range(0, batch_size + 1):
imgs = None
boxes_num_in_one_batch = 0
if bt_idx == batch_size:
if rem == 0:
continue
else:
boxes_num_in_one_batch = rem
elif bt_idx < batch_size:
boxes_num_in_one_batch = max_batch_size
else:
_LOGGER.error("batch_size error, bt_idx={}, batch_size={}".
format(bt_idx, batch_size))
break
start = bt_idx * max_batch_size
end = start + boxes_num_in_one_batch
img_list = []
for box_idx in range(start, end):
boximg = self.get_rotate_crop_image(im, dt_boxes[box_idx])
img_list.append(boximg)
h, w = boximg.shape[0:2]
wh_ratio = w * 1.0 / h
max_wh_ratio = max(max_wh_ratio, wh_ratio)
_, w, h = self.ocr_reader.resize_norm_img(img_list[0],
max_wh_ratio).shape
imgs = np.zeros((boxes_num_in_one_batch, 3, w, h)).astype('float32')
for id, img in enumerate(img_list):
norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio)
imgs[id] = norm_img
feed = {"x": imgs.copy()}
feed_list.append(feed)
return feed_list, False, None, ""
def postprocess(self, input_dicts, fetch_data, data_id, log_id):
rec_list = []
dt_num = len(self.dt_list)
if isinstance(fetch_data, dict):
if len(fetch_data) > 0:
rec_batch_res = self.ocr_reader.postprocess(
fetch_data, with_score=True)
for res in rec_batch_res:
rec_list.append(res)
elif isinstance(fetch_data, list):
for one_batch in fetch_data:
one_batch_res = self.ocr_reader.postprocess(
one_batch, with_score=True)
for res in one_batch_res:
rec_list.append(res)
result_list = []
for i in range(dt_num):
text = rec_list[i]
dt_box = self.dt_list[i]
if text[1] >= 0.5:
result_list.append([text, dt_box.tolist()])
res = {"result": str(result_list)}
return res, None, ""
class OcrService(WebService):
def get_pipeline_response(self, read_op):
det_op = DetOp(name="det", input_ops=[read_op])
rec_op = RecOp(name="rec", input_ops=[det_op])
return rec_op
uci_service = OcrService(name="ocr")
uci_service.prepare_pipeline_config("config.yml")
uci_service.run_service()
1.5、ppocr_keys_v1字典下载地址
1.6、安装包
paddle-serving-client==0.9.0
paddle-serving-app==0.9.0
paddle-serving-server-gpu==0.9.0.post101
1.7、ocr_reader.py
参考:官方下载地址
2、客户端部署
2.1、dockerfile编写
FROM registry.baidubce.com/paddlepaddle/serving:0.9.0-cuda10.1-cudnn7-devel
COPY . /deploy
WORKDIR /deploy
RUN pip config set global.index-url https://mirror.baidu.com/pypi/simple \
&& pip3.7 install --upgrade setuptools \
&& pip3.7 install --upgrade pip \
&& pip3.7 install -r requirements.txt
EXPOSE 9539
ENTRYPOINT ["gunicorn", "-c", "gunicorn_cfg.py", "main_api:app"]
2.2、制作镜像,启动容器指令
nvidia-docker build -t xx/ocr-python-client-qa:1.0.0.0630 .
nvidia-docker run -p 9539:9539 --name ocr-python-client-qa -d xx/ocr-python-client-qa:1.0.0.0630
nvidia-docker exec -it ocr-python-client-qa /bin/bash
2.3、pipeline_rpc_client.py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from paddle_serving_server_gpu.pipeline import PipelineClient
except ImportError:
from paddle_serving_server.pipeline import PipelineClient
import base64
import re
from config_env.config_envi import common
class ImageOCR(object):
def __init__(self):
self.client = PipelineClient()
self.client.connect([common['model_ip_port_server']])
self.index = ['姓名', '性别', '民族', '出生', '住址', '公民身份号码', '$']
def cv2_to_base64(self, image):
return base64.b64encode(image).decode('utf8')
def ocr_image(self, img_path):
with open(img_path, 'rb') as file:
image_data = file.read()
image = self.cv2_to_base64(image_data)
ret = self.client.predict(feed_dict={"image": image}, fetch=["res"])
res_new = eval(ret.value[0])
res_list = []
for txt in res_new:
res_list.append(txt[0][0])
res_list[0], res_list[1] = res_list[1], res_list[0]
res_str = ''.join(res_list)
res_dict = {}
for i in range(len(self.index) - 1):
re_res = re.search(r'%s(.*?)%s' % (self.index[i], self.index[i + 1]), res_str)
# print(re_res)
res_dict[self.index[i]] = re.sub(r'[\s。]', '', re_res.group(1)) if re_res else ''
return res_dict
if __name__ == '__main__':
image_ocr = ImageOCR()
test_img_path_ = "./11.jpg"
print(image_ocr.ocr_image(test_img_path_))
2.4、安装包
paddle-serving-client==0.9.0
paddle-serving-app==0.9.0
paddle-serving-server-gpu==0.9.0.post101
Flask-RESTful==0.3.9
Flask==1.1.4
flask_cors==3.0.10
prometheus-flask-exporter==0.18.7
gunicorn==20.1.0
gevent==21.12.0
其他参考,之前系列文章
一、OCR-docker部署最新版PaddleServing和PaddleOCR,服务端,客户端,dockerfile部署
二、OCR-docker部署最新版PaddleServing和PaddleOCR,服务端,客户端,dockerfile部署-C++部署
三、OCR-docker部署最新版PaddleServing和PaddleOCR,服务端,客户端,dockerfile部署-hubserving部署,GPU运行