diff options
-rw-r--r-- | nlp/scripts/api_squad_offline.py | 7 | ||||
-rw-r--r-- | nlp/scripts/api_squad_online.py | 3 | ||||
-rw-r--r-- | nlp/scripts/global_setting.py | 6 | ||||
-rw-r--r-- | standalone/src/main/assembly/dockerfile | 20 | ||||
-rwxr-xr-x | standalone/src/main/assembly/run.sh | 5 |
5 files changed, 25 insertions, 16 deletions
diff --git a/nlp/scripts/api_squad_offline.py b/nlp/scripts/api_squad_offline.py index 8860dfe..a54ab7f 100644 --- a/nlp/scripts/api_squad_offline.py +++ b/nlp/scripts/api_squad_offline.py @@ -38,7 +38,7 @@ from api_squad import validate_flags_or_throw from api_squad import read_squad_examples from global_setting import CUDA_VISIBLE_DEVICES -from global_setting import FLAGS_bert_config_file, FLAGS_vocab_file, FLAGS_init_checkpoint_squad, questions +from global_setting import FLAGS_bert_config_file, FLAGS_vocab_file, FLAGS_init_checkpoint_squad os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = str(CUDA_VISIBLE_DEVICES) @@ -62,7 +62,7 @@ def serving_input_fn(): def main(FLAGS_output_dir, FLAGS_init_checkpoint_squad, FLAGS_export_dir, FLAGS_predict_file=None, FLAGS_train_file=None, FLAGS_do_predict=False, FLAGS_do_train=False, FLAGS_train_batch_size=16, FLAGS_predict_batch_size=8, FLAGS_learning_rate=5e-5, FLAGS_num_train_epochs=3.0, - FLAGS_max_answer_length=100, FLAGS_max_query_length=64, FLAGS_version_2_with_negative=False): + FLAGS_max_answer_length=100, FLAGS_max_query_length=64, FLAGS_version_2_with_negative=False,questions=[]]): tf.logging.set_verbosity(tf.logging.INFO) bert_config = modeling.BertConfig.from_json_file(FLAGS_bert_config_file) @@ -222,10 +222,11 @@ class AI2Flask: FLAGS_train_file = request.json['FLAGS_train_file'] FLAGS_export_dir = request.json['FLAGS_export_dir'] task_id = request.json['task_id'] + questions = request.json['questions'] task = p.submit(main, FLAGS_output_dir, FLAGS_init_checkpoint_squad, FLAGS_export_dir, FLAGS_predict_file, FLAGS_train_file, FLAGS_do_predict, FLAGS_do_train, FLAGS_train_batch_size, FLAGS_predict_batch_size, FLAGS_learning_rate, FLAGS_num_train_epochs, - FLAGS_max_answer_length, FLAGS_max_query_length, FLAGS_version_2_with_negative) + FLAGS_max_answer_length, FLAGS_max_query_length, FLAGS_version_2_with_negative,questions) threads_mapping[task_id] = task return jsonify({"message": "Task submitted successfully", "status": "0"}) diff --git a/nlp/scripts/api_squad_online.py b/nlp/scripts/api_squad_online.py index abe3d5f..eba2cd2 100644 --- a/nlp/scripts/api_squad_online.py +++ b/nlp/scripts/api_squad_online.py @@ -13,7 +13,7 @@ import sys from flask import Flask, abort, request, jsonify import os -from global_setting import questions, tokenizer_ch, CUDA_VISIBLE_DEVICES +from global_setting import tokenizer_ch, CUDA_VISIBLE_DEVICES from create_squad_features import get_squad_feature_result @@ -38,6 +38,7 @@ class AI2Flask: except: title = 'Not available' text_origin = request.json['text'] + questions = request.json['questions'] if len(text_origin) > 800: text = text_origin[:800] diff --git a/nlp/scripts/global_setting.py b/nlp/scripts/global_setting.py index 51dfec1..625c529 100644 --- a/nlp/scripts/global_setting.py +++ b/nlp/scripts/global_setting.py @@ -21,9 +21,9 @@ CUDA_VISIBLE_DEVICES = "2" questions = ['Communication Service Name', 'Max Number of UEs', 'Data Rate Downlink', 'Latency', 'Data Rate Uplink', 'Resource Sharing Level', 'Mobility', 'Area'] # Configuration file -FLAGS_bert_config_file = '/home/run/uncased_L-12_H-768_A-12/bert_config.json' -FLAGS_vocab_file = '/home/run/uncased_L-12_H-768_A-12/vocab.txt' -FLAGS_init_checkpoint_squad = '/home/run/uncased_L-12_H-768_A-12/bert_model.ckpt' +FLAGS_bert_config_file = '/home/uuihome/uui/uncased_L-12_H-768_A-12/bert_config.json' +FLAGS_vocab_file = '/home/uuihome/uui/uncased_L-12_H-768_A-12/vocab.txt' +FLAGS_init_checkpoint_squad = '/home/uuihome/uui/uncased_L-12_H-768_A-12/bert_model.ckpt' max_seq_length = 512 diff --git a/standalone/src/main/assembly/dockerfile b/standalone/src/main/assembly/dockerfile index 970eafd..4203e20 100644 --- a/standalone/src/main/assembly/dockerfile +++ b/standalone/src/main/assembly/dockerfile @@ -1,6 +1,12 @@ FROM tensorflow/serving:latest -WORKDIR /home/run/ +RUN mkdir -p /home/uuihome/uui + +WORKDIR /home/uuihome/uui + +RUN groupadd uui -g 1000 + +RUN useradd -u 1000 -d /home/uuihome/uui -g uui -s /bin/bash uui RUN DEBIAN_FRONTEND=noninteractive apt-get update RUN DEBIAN_FRONTEND=noninteractive apt-get install wget -y @@ -10,12 +16,12 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get install python3-pip python3-dev build RUN ln -s /usr/bin/python3 /usr/bin/python RUN ln -s /usr/bin/pip3 /usr/bin/pip -COPY requirements /home/run/requirements +COPY requirements /home/uuihome/uui/requirements RUN DEBIAN_FRONTEND=noninteractive pip install -r requirements +COPY scripts /home/uuihome/uui/scripts +COPY run.sh /home/uuihome/uui/run.sh -COPY scripts /home/run/scripts - -COPY run.sh /home/run/run.sh - -ENTRYPOINT /home/run/run.sh +RUN chown -R uui:uui /home/uuihome/uui +USER uui +ENTRYPOINT /home/uuihome/uui/run.sh
\ No newline at end of file diff --git a/standalone/src/main/assembly/run.sh b/standalone/src/main/assembly/run.sh index 6ec5f75..94b4afa 100755 --- a/standalone/src/main/assembly/run.sh +++ b/standalone/src/main/assembly/run.sh @@ -15,7 +15,7 @@ # limitations under the License. # -cd /home/run/ +cd /home/uuihome/uui/ wget https://github.com/google-research/bert/archive/master.zip wget https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip @@ -23,8 +23,9 @@ unzip master.zip unzip uncased_L-12_H-768_A-12.zip rm master.zip uncased_L-12_H-768_A-12.zip cp scripts/* bert-master/ -cd /home/run/bert-master/ +cd /home/uuihome/uui/bert-master/ +mkdir upload nohup python -u api_squad_online.py 33011 > online.log 2>&1 & nohup python -u api_squad_offline.py 33012 > offline.log 2>&1 & nohup python -u upload.py 33013 > upload.log 2>&1 & |