diff --git a/docker_run.sh b/docker_run.sh index 7cbff37e2..3f572e591 100755 --- a/docker_run.sh +++ b/docker_run.sh @@ -30,5 +30,5 @@ docker run \ $docker_devices \ -v $HERE/share:/opt/ml-suite/share \ -w /opt/ml-suite \ - xilinx-ml-suite-ubuntu-16.04-xrt-2018.2-caffe-mls-1.4:latest \ + durgabhavaniv/xilinx-ml_suite:latest \ bash diff --git a/docs/image_classifier_terminal_caffe.md b/docs/image_classifier_terminal_caffe.md new file mode 100644 index 000000000..7efe18b24 --- /dev/null +++ b/docs/image_classifier_terminal_caffe.md @@ -0,0 +1,86 @@ +## Running an image classifier with caffe model in docker containers. + +1. Clone ML Suite + + ``` + git clone https://github.com/Xilinx/ml-suite.git + ``` + +2. Download ml-suite container + + ``` + https://www.xilinx.com/member/forms/download/eula-xef.html?filename=xilinx-ml-suite-ubuntu-16.04-xrt-2018.2-caffe-mls-1.4.tar.gz + ``` + +3. Load container + + ``` + sudo docker load < xilinx-ml-suite-ubuntu-16.04-xrt-2018.2-caffe-mls-1.4.tar.gz + ``` + +4. Run docker container + + ``` + $ cd ml-suite + $ sudo ./docker_run.sh + ``` + +5. One time setup + + ``` + python -m ck pull repo:ck-env + python -m ck install package:imagenet-2012-val-min + python -m ck install package:imagenet-2012-aux + head -n 500 $HOME/CK-TOOLS/dataset-imagenet-ilsvrc2012-aux/val.txt > $HOME/CK-TOOLS/dataset-imagenet-ilsvrc2012-val-min/val_map.txt + ``` + + Resize all the images to a common dimension for Caffe + ``` + python -m pip --no-cache-dir install opencv-python --user + python MLsuite/examples/caffe/resize.py $HOME/CK-TOOLS/dataset-imagenet-ilsvrc2012-val-min 256 256 + ``` + + Get necessary models + ``` + cd /opt/ml-suite/examples/caffe + python getModels.py + ``` + +6. environment setup + + ``` + export MLSUITE_ROOT=/opt/ml-suite + source $MLSUITE_ROOT/overlaybins/setup.sh + ``` + +7. **Quantize the model** - The quantizer will generate scaling parameters for quantizing floats INT8. This is required, because FPGAs will take advantage of Fixed Point Precision, to achieve more parallelization at lower power + + ``` + cd /opt/ml-suite/share + export DECENT_DEBUG=1 + /opt/caffe/build/tools/decent_q quantize -model /opt/models/caffe/bvlc_googlenet/bvlc_googlenet_train_val.prototxt -weights /opt/models/caffe/bvlc_googlenet/bvlc_googlenet.caffemodel -auto_test -test_iter 1 --calib_iter 1 + ``` + + This outputs quantize_info.txt, deploy.prototxt, deploy.caffemodel to $HOME/share/quantize_results/ directory + +8. **Compile the Model** - In this step, the network Graph (prototxt) and the Weights (caffemodel) are compiled by the compiler + + ``` + python $MLSUITE_ROOT/xfdnn/tools/compile/bin/xfdnn_compiler_caffe.pyc -b 1 -i 96 -m 9 -d 256 -mix --pipelineconvmaxpool --usedeephi --quant_cfgfile quantize_results/quantize_info.txt -n quantize_results/deploy.prototxt -w quantize_results/deploy.caffemodel -g work/compiler -qz work/quantizer -C + ``` + + This outputs compiler.json, quantizer.json and deploy.caffemodel_data.h5 to $HOME/share/work/ directory + +9. **Subgraph Cutting** - In this step, the original graph is cut, and a custom FPGA accelerated python layer is inserted to be used for Inference. + + ``` + python $MLSUITE_ROOT/xfdnn/rt/scripts/framework/caffe/xfdnn_subgraph.py --inproto quantize_results/deploy.prototxt --trainproto /opt/models/caffe/bvlc_googlenet/bvlc_googlenet_train_val.prototxt --outproto xfdnn_auto_cut_deploy.prototxt --cutAfter data --xclbin $MLSUITE_ROOT/overlaybins/$MLSUITE_PLATFORM/overlay_4.xclbin --netcfg work/compiler.json --quantizecfg work/quantizer.json --weights work/deploy.caffemodel_data.h5 --profile True + ``` + + This output xfdnn_auto_cut_deploy.prototxt to $HOME/share/ directory + +10. Running image classification for caffe model. + + ``` + python $MLSUITE_ROOT/examples/caffe/caffe_run.py --caffemodel $MLSUITE_ROOT/share/quantize_results/deploy.caffemodel --prototxt $MLSUITE_ROOT/share/xfdnn_auto_cut_deploy.prototxt --synset_words $MLSUITE_ROOT/examples/deployment_modes/synset_words.txt --image $MLSUITE_ROOT/examples/deployment_modes/dog.jpg + ``` diff --git a/docs/img/image_classifier_caffe.png b/docs/img/image_classifier_caffe.png new file mode 100644 index 000000000..14148826a Binary files /dev/null and b/docs/img/image_classifier_caffe.png differ diff --git a/examples/caffe/README.md b/examples/caffe/README.md index dac35d6ff..835eb79a4 100644 --- a/examples/caffe/README.md +++ b/examples/caffe/README.md @@ -69,7 +69,7 @@ After the setup, run through a sample end to end caffe classification example us 4. **Inference** - Run a single image on the FPGA ``` - python run.py --prototxt /opt/models/caffe/bvlc_googlenet/bvlc_googlenet_train_val.prototxt --caffemodel /opt/models/caffe/bvlc_googlenet/bvlc_googlenet.caffemodel --image ../deployment_modes/dog.jpg + python run.py --image ../deployment_modes/dog.jpg ``` 5. **Benchmark FPGA performance** - evaluate network throughput and/or latency in a streaming deployment scenario (FPGA only) diff --git a/examples/caffe/REST/app.py b/examples/caffe/REST/app.py index 263b8e8f5..2c0d1881f 100755 --- a/examples/caffe/REST/app.py +++ b/examples/caffe/REST/app.py @@ -7,13 +7,13 @@ import io app = flask.Flask(__name__) - + def LoadImage(prototxt,caffemodel,labels): import numpy as np import xdnn_io global net net = caffe.Net(prototxt,caffemodel,caffe.TEST) - return net + return net def InferImage(net,image,labels): import numpy as np @@ -36,50 +36,57 @@ def InferImage(net,image,labels): pass Labels = xdnn_io.get_labels(labels) xdnn_io.printClassification(softmax,[image],Labels) - return xdnn_io.getClassification(softmax,[image],Labels) + return xdnn_io.getClassification(softmax,[image],Labels) + +#@app.route('/predict', methods=['GET','POST']) # go through link http://172.24.157.249:5000/predict for result -@app.route("/predict", methods=["POST"]) +@app.route('/') # go through link http://172.24.157.249:5000/ for result def predict(): data = {"success": False} global prototxt global model global synset_words + global image - if flask.request.method == "POST": - image = flask.request.files["image"] - response = InferImage(net, image, synset_words) - data["success"] = True - data["response"] = response + response = InferImage(net, image, synset_words) + data["success"] = True + data["response"] = response return flask.jsonify(data) if __name__ == "__main__": parser = argparse.ArgumentParser(description='pyXFDNN') - parser.add_argument('--caffemodel', default="/opt/models/caffe/bvlc_googlenet/bvlc_googlenet.caffemodel", help='path to caffemodel file eg: /opt/models/caffe/bvlc_googlenet/bvlc_googlenet.caffemodel') - parser.add_argument('--prototxt', default="xfdnn_auto_cut_deploy.prototxt", help='path to prototxt file eg: xfdnn_auto_cut_deploy.prototxt') - parser.add_argument('--synset_words', default="$HOME/CK-TOOLS/dataset-imagenet-ilsvrc2012-aux/synset_words.txt", help='path to synset_words eg: $HOME/CK-TOOLS/dataset-imagenet-ilsvrc2012-aux/synset_words.txt') + parser.add_argument('--caffemodel', default="/opt/ml-suite/share/quantize_results/deploy.caffemodel", help='path to caffe model file eg: /opt/models/caffe/bvlc_googlenet/bvlc_googlenet.caffemodel') + parser.add_argument('--prototxt', default="/opt/ml-suite/share/xfdnn_auto_cut_deploy.prototxt", help='path to prototxt file eg: xfdnn_auto_cut_deploy.prototxt') + parser.add_argument('--synset_words', default="/opt/ml-suite/examples/deployment_modes/synset_words.txt", help='path to synset_words eg: $HOME/CK-TOOLS/dataset-imagenet-ilsvrc2012-aux/synset_words.txt') parser.add_argument('--port', default=5000) - + parser.add_argument('--image', default="/opt/ml-suite/examples/deployment_modes/dog.jpg") + args = vars(parser.parse_args()) if args["caffemodel"]: model=args["caffemodel"] - + if args["prototxt"]: prototxt=args["prototxt"] if args["synset_words"]: synset_words=args["synset_words"] - + + if args["image"]: + image=args["image"] + if args["port"]: port=args["port"] else: - port=9000 + port=9000 print("Loading FPGA with image...") net = LoadImage(prototxt, model, synset_words) - - print("Starting Flask Server...") - app.run(port=port) + #print("infering the image") + #InferImage(net,image,synset_words) + + print("Starting Flask Server...") + app.run(port=port,host='0.0.0.0') diff --git a/examples/caffe/REST/run.sh b/examples/caffe/REST/run.sh index caec3f57f..3bdb66b46 100755 --- a/examples/caffe/REST/run.sh +++ b/examples/caffe/REST/run.sh @@ -14,6 +14,4 @@ PORT="5000" python ../run.py --prototxt ${MODEL_PROTOTXT} --caffemodel ${MODEL_WEIGHTS} --prepare -python app.py --caffemodel ${MODEL_WEIGHTS} --prototxt xfdnn_auto_cut_deploy.prototxt --synset_words ${LABELS} --port ${PORT} - - +python app.py diff --git a/examples/caffe/caffe_run.py b/examples/caffe/caffe_run.py new file mode 100644 index 000000000..bc153acc1 --- /dev/null +++ b/examples/caffe/caffe_run.py @@ -0,0 +1,39 @@ +from __future__ import print_function + +import os,sys,argparse +import caffe +import io +import numpy as np +import xdnn_io + +# Use this routine to classify a single image +def Classify(prototxt,caffemodel,image,labels): + classifier = caffe.Classifier(prototxt,caffemodel, + image_dims=[256,256], mean=np.array([104,117,123]), + raw_scale=255, channel_swap=[2,1,0]) + predictions = classifier.predict([caffe.io.load_image(image)]).flatten() + labels = np.loadtxt(labels, str, delimiter='\t') + top_k = predictions.argsort()[-1:-6:-1] + for l,p in zip(labels[top_k],predictions[top_k]): + print (l," : ",p) + return classifier + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='pyXFDNN') + parser.add_argument('--caffemodel', default="/opt/ml-suite/share/quantize_results/deploy.caffemodel", help='path to caffe model eg: /opt/ml-suite/share/quantize_results/deploy.caffemodel') + parser.add_argument('--prototxt', default="/opt/ml-suite/share/xfdnn_auto_cut_deploy.prototxt", help='path to prototxt file eg: /opt/ml-suite/share/xfdnn_auto_cut_deploy.prototxt') + parser.add_argument('--synset_words', default="/opt/ml-suite/examples/deployment_modes/synset_words.txt", help='path to synset_words eg: /opt/ml-suite/examples/deployment_modes/synset_words.txt') + parser.add_argument('--image', default="/opt/ml-suite/examples/deployment_modes/dog.jpg") + args = vars(parser.parse_args()) + + if args["caffemodel"]: + model=args["caffemodel"] + if args["prototxt"]: + prototxt=args["prototxt"] + if args["synset_words"]: + synset_words=args["synset_words"] + if args["image"]: + image=args["image"] + + print("Loading FPGA with image and classify...") + Classify(prototxt, model, image, synset_words)