20220517更新,copy了官方Dockerfile(直接copy无法启动,需要更改模型和模型目录名称、实际运行端口等),给出实际部署示例
镜像构建
# Version: 2.3.0
FROM registry.baidubce.com/paddlepaddle/paddle:2.3.0
# PaddleOCR base on Python3.7
RUN pip3.7 install --upgrade pip -i https://mirror.baidu.com/pypi/simple
RUN pip3.7 install paddlehub --upgrade -i https://mirror.baidu.com/pypi/simple
RUN git clone https://github.com/PaddlePaddle/PaddleOCR.git /PaddleOCR
WORKDIR /PaddleOCR
RUN pip3.7 install -r requirements.txt -i https://mirror.baidu.com/pypi/simple
RUN mkdir -p /PaddleOCR/inference/
ENV det_model_link=https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar
ENV det_model_file=ch_ppocr_server_v2.0_det_infer
ENV classify_model_link=https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar
ENV classify_model_file=ch_ppocr_mobile_v2.0_cls_infer
ENV rec_model_link=https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar
ENV rec_model_file=ch_ppocr_server_v2.0_rec_infer
# Download orc detect model(light version). if you want to change normal version, you can change ch_ppocr_mobile_v2.0_det_infer to ch_ppocr_server_v2.0_det_infer, also remember change det_model_dir in deploy/hubserving/ocr_system/params.py)
ADD ${det_model_link} /PaddleOCR/inference/
RUN tar xf /PaddleOCR/inference/${det_model_file}.tar -C /PaddleOCR/inference/ \
&& mv /PaddleOCR/inference/${det_model_file} /PaddleOCR/inference/ch_PP-OCRv3_det_infer
# Download direction classifier(light version). If you want to change normal version, you can change ch_ppocr_mobile_v2.0_cls_infer to ch_ppocr_mobile_v2.0_cls_infer, also remember change cls_model_dir in deploy/hubserving/ocr_system/params.py)
ADD ${classify_model_link} /PaddleOCR/inference/
RUN tar xf /PaddleOCR/inference/${classify_model_file}.tar -C /PaddleOCR/inference/
# && mv /PaddleOCR/inference/${classify_model_file} /PaddleOCR/inference/ch_ppocr_mobile_v2.0_cls_infer
# Download orc recognition model(light version). If you want to change normal version, you can change ch_ppocr_mobile_v2.0_rec_infer to ch_ppocr_server_v2.0_rec_infer, also remember change rec_model_dir in deploy/hubserving/ocr_system/params.py)
ADD ${rec_model_link} /PaddleOCR/inference/
RUN tar xf /PaddleOCR/inference/${rec_model_file}.tar -C /PaddleOCR/inference/ \
&& mv /PaddleOCR/inference/${rec_model_file} /PaddleOCR/inference/ch_PP-OCRv3_rec_infer
EXPOSE 8866
CMD ["/bin/bash","-c","hub install deploy/hubserving/ocr_system/ && hub serving start -m ocr_system"]
使用说明
# 1、 构建移动端模型镜像
docker build -t paddleocr:cpu .
# 2、启动移动端模型镜像生成容器、注意宿主机8868端口是否已经占用
docker run -dp 8869:8866 --name paddle_ocr paddleocr:cpu
# 查看容器日志(成功运行将显示项目对应运行地址和端口)
docker logs -f paddle_ocr