From dd67db0dede71551c772caa685d3c12a1a3e57d2 Mon Sep 17 00:00:00 2001 From: zhaoyehua Date: Wed, 24 Mar 2021 16:25:33 +0800 Subject: feat:Adjust the directory and increase the image production process Issue-ID: USECASEUI-525 Change-Id: I7bcbf0b48778fd59946483b253f32dda217913c0 Signed-off-by: zhaoyehua --- assembly.xml | 65 -- assembly/dockerfile | 53 -- assembly/requirements | 29 - assembly/run.sh | 19 - initialize.sh | 16 - mvn-phase-script.sh | 86 --- nlp/assembly.xml | 65 ++ nlp/initialize.sh | 16 + nlp/mvn-phase-script.sh | 86 +++ nlp/pom.xml | 113 ++++ nlp/requirements.txt | 4 + nlp/scripts/api_squad.py | 1028 +++++++++++++++++++++++++++++ nlp/scripts/api_squad_offline.py | 264 ++++++++ nlp/scripts/api_squad_online.py | 81 +++ nlp/scripts/create_squad_features.py | 721 ++++++++++++++++++++ nlp/scripts/global_setting.py | 31 + nlp/scripts/load_model.sh | 6 + nlp/test_1.py | 24 + nlp/tox.ini | 26 + pom.xml | 130 ++-- requirements.txt | 4 - scripts/api_squad.py | 1028 ----------------------------- scripts/api_squad_offline.py | 264 -------- scripts/api_squad_online.py | 81 --- scripts/create_squad_features.py | 721 -------------------- scripts/global_setting.py | 31 - scripts/load_model.sh | 6 - standalone/pom.xml | 173 +++++ standalone/src/main/assembly/LICENSE | 473 +++++++++++++ standalone/src/main/assembly/dockerfile | 21 + standalone/src/main/assembly/requirements | 29 + standalone/src/main/assembly/run.sh | 29 + test_1.py | 24 - tox.ini | 26 - 34 files changed, 3230 insertions(+), 2543 deletions(-) delete mode 100644 assembly.xml delete mode 100644 assembly/dockerfile delete mode 100644 assembly/requirements delete mode 100755 assembly/run.sh delete mode 100755 initialize.sh delete mode 100755 mvn-phase-script.sh create mode 100644 nlp/assembly.xml create mode 100755 nlp/initialize.sh create mode 100755 nlp/mvn-phase-script.sh create mode 100644 nlp/pom.xml create mode 100644 nlp/requirements.txt create mode 100644 nlp/scripts/api_squad.py create mode 100644 nlp/scripts/api_squad_offline.py create mode 100644 nlp/scripts/api_squad_online.py create mode 100644 nlp/scripts/create_squad_features.py create mode 100644 nlp/scripts/global_setting.py create mode 100755 nlp/scripts/load_model.sh create mode 100644 nlp/test_1.py create mode 100644 nlp/tox.ini delete mode 100644 requirements.txt delete mode 100644 scripts/api_squad.py delete mode 100644 scripts/api_squad_offline.py delete mode 100644 scripts/api_squad_online.py delete mode 100644 scripts/create_squad_features.py delete mode 100644 scripts/global_setting.py delete mode 100755 scripts/load_model.sh create mode 100644 standalone/pom.xml create mode 100644 standalone/src/main/assembly/LICENSE create mode 100644 standalone/src/main/assembly/dockerfile create mode 100644 standalone/src/main/assembly/requirements create mode 100755 standalone/src/main/assembly/run.sh delete mode 100644 test_1.py delete mode 100644 tox.ini diff --git a/assembly.xml b/assembly.xml deleted file mode 100644 index 011ac9e..0000000 --- a/assembly.xml +++ /dev/null @@ -1,65 +0,0 @@ - - - nlp - - zip - - - - scripts - /scripts - - **/*.py - **/*.json - **/*.xml - **/*.yml - **/*.sh - - - - resources - /resources - - **/*.sh - **/*.sql - - - - assembly - /assembly - - *.sh - dockerfile - requirements - - - - . - / - - *.py - *.txt - *.sh - *.ini - *.md - - - - usecase-ui/nlp - diff --git a/assembly/dockerfile b/assembly/dockerfile deleted file mode 100644 index b022d60..0000000 --- a/assembly/dockerfile +++ /dev/null @@ -1,53 +0,0 @@ -FROM tensorflow/serving:latest - -WORKDIR /home/run/ - -RUN DEBIAN_FRONTEND=noninteractive apt-get update - -#RUN DEBIAN_FRONTEND=noninteractive apt-get install python3.7.3 -RUN DEBIAN_FRONTEND=noninteractive apt-get install wget -y -RUN DEBIAN_FRONTEND=noninteractive apt-get install make -y -RUN DEBIAN_FRONTEND=noninteractive apt-get install gcc -y -RUN DEBIAN_FRONTEND=noninteractive apt-get install zlib* -y -RUN DEBIAN_FRONTEND=noninteractive apt-get install libffi-dev -y -RUN DEBIAN_FRONTEND=noninteractive apt-get install openssl -y -#RUN DEBIAN_FRONTEND=noninteractive wget https://www.python.org/ftp/python/3.7.3/Python-3.7.3.tgz - -WORKDIR /home/run/ -RUN wget http://www.openssl.org/source/openssl-1.0.2j.tar.gz -RUN tar -zxvf openssl-1.0.2j.tar.gz -WORKDIR /home/run/openssl-1.0.2j -RUN ./config --prefix=/usr/local/lab/openssl-1.0.2j shared no-zlib -RUN make -RUN make install -RUN ln -s /usr/local/lab/openssl-1.0.2j/lib/libssl.so.1.0.0 /usr/lib/libssl.so.1.0.0 -RUN ln -s /usr/local/lab/openssl-1.0.2j/lib/libcrypto.so.1.0.0 /usr/lib/libcrypto.so.1.0.0 - - -WORKDIR /home/run/ -COPY Python-3.7.3.tar.gz /home/run/ -RUN tar -zxvf Python-3.7.3.tar.gz -WORKDIR /home/run/Python-3.7.3/ -#RUN DEBIAN_FRONTEND=noninteractive ./configure --with-ssl -#RUN rm /home/run/Python-3.7.3/Modules/Setup -#COPY Setup /home/run/Python-3.7.3/Modules/Setup -RUN make -RUN make install -RUN ln -s /usr/local/bin/python3 /usr/bin/python -RUN ln -s /usr/local/bin/pip3 /usr/bin/pip - - -WORKDIR /home/run/ -RUN DEBIAN_FRONTEND=noninteractive apt-get install curl -y -RUN DEBIAN_FRONTEND=noninteractive curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py -RUN DEBIAN_FRONTEND=noninteractive python get-pip.py - -COPY requirements /home/run/requirements -RUN DEBIAN_FRONTEND=noninteractive pip install -r requirements - -ADD nlp.tar.gz /home/run/ - -COPY run.sh /home/run/run.sh - -ENTRYPOINT /home/run/run.sh - diff --git a/assembly/requirements b/assembly/requirements deleted file mode 100644 index a9cc63f..0000000 --- a/assembly/requirements +++ /dev/null @@ -1,29 +0,0 @@ -entrypoints==0.3 -Flask==1.1.1 -future==0.17.1 -joblib==0.13.2 -json5==0.8.4 -jsonschema==3.0.1 -Keras-Applications==1.0.8 -Keras-Preprocessing==1.1.0 -lxml==4.3.4 -matplotlib==3.1.0 -mkl-fft -mkl-random -mkl-service -numpy==1.16.4 -numpydoc==0.9.1 -openpyxl==2.6.2 -pandas==0.24.2 -protobuf==3.13.0 -requests==2.22.0 -scikit-learn==0.21.2 -setuptools==41.0.1 -six==1.12.0 -tensorboard==1.14.0 -tensorflow==1.14.0 -tensorflow-estimator==1.14.0 -urllib3==1.24.2 -xlrd==1.2.0 -XlsxWriter==1.1.8 -xlwt==1.3.0 diff --git a/assembly/run.sh b/assembly/run.sh deleted file mode 100755 index 132bfb6..0000000 --- a/assembly/run.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# -# Copyright 2016-2017 ZTE Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -sh start.sh 33011 -nohup python -u api_squad_online.py 33011 > online.log 2>&1 & -nohup python -u api_squad_offline.py 33012 > offline.log 2>&1 & diff --git a/initialize.sh b/initialize.sh deleted file mode 100755 index ba50ed0..0000000 --- a/initialize.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -# Copyright 2017 , Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -pip install -r requirements.txt diff --git a/mvn-phase-script.sh b/mvn-phase-script.sh deleted file mode 100755 index be7f566..0000000 --- a/mvn-phase-script.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/bash -# Copyright 2018 CTC , Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -set -e - -echo "running script: [$0] for module [$1] at stage [$2]" - -export SETTINGS_FILE=${SETTINGS_FILE:-$HOME/.m2/settings.xml} -MVN_PROJECT_MODULEID="$1" -MVN_PHASE="$2" - - -FQDN="${MVN_PROJECT_GROUPID}.${MVN_PROJECT_ARTIFACTID}" -if [ "$MVN_PROJECT_MODULEID" == "__" ]; then - MVN_PROJECT_MODULEID="" -fi - -if [ -z "$WORKSPACE" ]; then - WORKSPACE=$(pwd) -fi - - -# mvn phase in life cycle -MVN_PHASE="$2" - - -echo "MVN_PROJECT_MODULEID is [$MVN_PROJECT_MODULEID]" -echo "MVN_PHASE is [$MVN_PHASE]" -echo "MVN_PROJECT_GROUPID is [$MVN_PROJECT_GROUPID]" -echo "MVN_PROJECT_ARTIFACTID is [$MVN_PROJECT_ARTIFACTID]" -echo "MVN_PROJECT_VERSION is [$MVN_PROJECT_VERSION]" - -run_tox_test() -{ - set -x - CURDIR=$(pwd) - if [[ ${CURDIR} =~ "-sonar" ]] - then - echo "====Sonar job, need execute tox." - TOXINIS=$(find . -name "tox.ini") - for TOXINI in "${TOXINIS[@]}"; do - DIR=$(echo "$TOXINI" | rev | cut -f3- -d'/' | rev) - cd "${CURDIR}/${DIR}" - rm -rf ./venv-tox ./.tox - virtualenv ./venv-tox - source ./venv-tox/bin/activate - pip install --upgrade pip - pip install --upgrade tox argparse - pip freeze - tox - deactivate - rm -rf ./venv-tox ./.tox - done - else - echo "====Not a sonar job, need not execute tox." - fi -} - - -case $MVN_PHASE in -clean) - echo "==> clean phase script" - rm -rf ./venv-* - ;; -test) - echo "==> test phase script" - run_tox_test - ;; -*) - echo "==> unprocessed phase" - ;; -esac - diff --git a/nlp/assembly.xml b/nlp/assembly.xml new file mode 100644 index 0000000..011ac9e --- /dev/null +++ b/nlp/assembly.xml @@ -0,0 +1,65 @@ + + + nlp + + zip + + + + scripts + /scripts + + **/*.py + **/*.json + **/*.xml + **/*.yml + **/*.sh + + + + resources + /resources + + **/*.sh + **/*.sql + + + + assembly + /assembly + + *.sh + dockerfile + requirements + + + + . + / + + *.py + *.txt + *.sh + *.ini + *.md + + + + usecase-ui/nlp + diff --git a/nlp/initialize.sh b/nlp/initialize.sh new file mode 100755 index 0000000..ba50ed0 --- /dev/null +++ b/nlp/initialize.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# Copyright 2017 , Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +pip install -r requirements.txt diff --git a/nlp/mvn-phase-script.sh b/nlp/mvn-phase-script.sh new file mode 100755 index 0000000..be7f566 --- /dev/null +++ b/nlp/mvn-phase-script.sh @@ -0,0 +1,86 @@ +#!/bin/bash +# Copyright 2018 CTC , Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -e + +echo "running script: [$0] for module [$1] at stage [$2]" + +export SETTINGS_FILE=${SETTINGS_FILE:-$HOME/.m2/settings.xml} +MVN_PROJECT_MODULEID="$1" +MVN_PHASE="$2" + + +FQDN="${MVN_PROJECT_GROUPID}.${MVN_PROJECT_ARTIFACTID}" +if [ "$MVN_PROJECT_MODULEID" == "__" ]; then + MVN_PROJECT_MODULEID="" +fi + +if [ -z "$WORKSPACE" ]; then + WORKSPACE=$(pwd) +fi + + +# mvn phase in life cycle +MVN_PHASE="$2" + + +echo "MVN_PROJECT_MODULEID is [$MVN_PROJECT_MODULEID]" +echo "MVN_PHASE is [$MVN_PHASE]" +echo "MVN_PROJECT_GROUPID is [$MVN_PROJECT_GROUPID]" +echo "MVN_PROJECT_ARTIFACTID is [$MVN_PROJECT_ARTIFACTID]" +echo "MVN_PROJECT_VERSION is [$MVN_PROJECT_VERSION]" + +run_tox_test() +{ + set -x + CURDIR=$(pwd) + if [[ ${CURDIR} =~ "-sonar" ]] + then + echo "====Sonar job, need execute tox." + TOXINIS=$(find . -name "tox.ini") + for TOXINI in "${TOXINIS[@]}"; do + DIR=$(echo "$TOXINI" | rev | cut -f3- -d'/' | rev) + cd "${CURDIR}/${DIR}" + rm -rf ./venv-tox ./.tox + virtualenv ./venv-tox + source ./venv-tox/bin/activate + pip install --upgrade pip + pip install --upgrade tox argparse + pip freeze + tox + deactivate + rm -rf ./venv-tox ./.tox + done + else + echo "====Not a sonar job, need not execute tox." + fi +} + + +case $MVN_PHASE in +clean) + echo "==> clean phase script" + rm -rf ./venv-* + ;; +test) + echo "==> test phase script" + run_tox_test + ;; +*) + echo "==> unprocessed phase" + ;; +esac + diff --git a/nlp/pom.xml b/nlp/pom.xml new file mode 100644 index 0000000..68f4b45 --- /dev/null +++ b/nlp/pom.xml @@ -0,0 +1,113 @@ + + + + + org.onap.usecase-ui.nlp + usecase-ui-nlp-parent + 1.0.0-SNAPSHOT + + org.onap.usecase-ui + usecase-ui-nlp + 1.0.0-SNAPSHOT + usecase-ui-nlp + pom + usecase-ui nlp + 4.0.0 + + UTF-8 + . + xunit-results.xml + coverage.xml + py + python + **/**.py + **/tests/**.py,**/test*.py + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + ${project.basedir}/mvn-phase-script.sh + + + ${project.groupId} + ${project.artifactId} + ${project.version} + + + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + clean phase script + clean + + exec + + + + __ + clean + + + + + test script + test + + exec + + + + __ + test + + + + + + + maven-assembly-plugin + + false + + assembly.xml + + + + + make-assembly + package + + single + + + + + + + diff --git a/nlp/requirements.txt b/nlp/requirements.txt new file mode 100644 index 0000000..024d1b4 --- /dev/null +++ b/nlp/requirements.txt @@ -0,0 +1,4 @@ +pytest +entrypoints==0.3 +Flask==1.1.1 +coverage diff --git a/nlp/scripts/api_squad.py b/nlp/scripts/api_squad.py new file mode 100644 index 0000000..f29a74b --- /dev/null +++ b/nlp/scripts/api_squad.py @@ -0,0 +1,1028 @@ +# coding=utf-8 +# squad interface +# Required parameters +# FLAGS_output_dir :the output path of the model training during training process, the output of the trained model, etc.; the output path of the model prediction during predicting process +# FLAGS_init_checkpoint_squad : model initialization path, use bert pre-trained model for training; use the output path during training for prediction +# FLAGS_predict_file : the file to be predicted, csv file +# FLAGS_train_file : file to be trained, csv file +# FLAGS_do_predict : whether to predict or not +# FLAGS_do_train : whether to train or not +# FLAGS_train_batch_size : the batch_size for training, default : 16 +# FLAGS_predict_batch_size : the batch_size when predicting, default: 8 +# FLAGS_learning_rate : the learning_rate at training time, default: 5e-5 +# FLAGS_num_train_epochs : epochs at training time, default: 3 +# FLAGS_max_answer_length : the maximum length of the answer, default: 100 characters +# FLAGS_max_query_length : the maximum length of the question, default: 64 +# FLAGS_version_2_with_negative : whether there is no answer to the question, default false, must be set to False when reasoning + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import json +import math +import modeling +import optimization +import tokenization +import six +import tensorflow as tf +import pandas as pd +from global_setting import FLAGS_init_checkpoint_squad + +FLAGS_max_seq_length = 512 +FLAGS_do_lower_case = True +FLAGS_doc_stride = 128 + + +FLAGS_save_checkpoints_steps = 1000 +FLAGS_iterations_per_loop = 1000 +FLAGS_n_best_size = 20 +FLAGS_tpu_zone = None +FLAGS_tpu_name = None +FLAGS_num_tpu_cores = 8 +FLAGS_verbose_logging = False +FLAGS_master = None +FLAGS_use_tpu = False +FLAGS_warmup_proportion = 0.1 +FLAGS_gcp_project = None +FLAGS_null_score_diff_threshold = 0.0 + + +def make_json(input_file, questions): + print(input_file) + data_train = pd.read_excel(input_file) + print(444) + data_train.fillna(0, inplace=True) + data_train.index = [i for i in range(len(data_train))] + question = questions + res = {} + res['data'] = [] + data_inside = {} + for i in data_train.index: + data_inside['title'] = 'Not available' + data_inside['paragraphs'] = [] + paragraphs_inside = {} + paragraphs_inside['context'] = data_train.loc[i, 'text'] + paragraphs_inside['qas'] = [] + for ques in question: + qas_inside = {} + qas_inside['answers'] = [] + if data_train.loc[i, ques]: + answer_inside = {} + answer_inside['text'] = str(data_train.loc[i, ques]) + answer_inside['answer_start'] = paragraphs_inside['context'].find(answer_inside['text']) + qas_inside['is_impossible'] = 0 + else: + qas_inside['is_impossible'] = 1 + answer_inside = {} + qas_inside['id'] = str(i) + ques + qas_inside['question'] = ques + qas_inside['answers'].append(answer_inside.copy()) + paragraphs_inside['qas'].append(qas_inside.copy()) + data_inside['paragraphs'].append(paragraphs_inside.copy()) + + res['data'].append(data_inside.copy()) + print('make json done') + return json.dumps(res) + + +class SquadExample(object): + """A single training/test example for simple sequence classification. + + For examples without an answer, the start and end position are -1. + """ + + def __init__(self, + qas_id, + question_text, + doc_tokens, + orig_answer_text=None, + start_position=None, + end_position=None, + is_impossible=False): + self.qas_id = qas_id + self.question_text = question_text + self.doc_tokens = doc_tokens + self.orig_answer_text = orig_answer_text + self.start_position = start_position + self.end_position = end_position + self.is_impossible = is_impossible + + def __str__(self): + return self.__repr__() + + def __repr__(self): + s = "" + s += "qas_id: %s" % (tokenization.printable_text(self.qas_id)) + s += ", question_text: %s" % ( + tokenization.printable_text(self.question_text)) + s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) + if self.start_position: + s += ", start_position: %d" % (self.start_position) + if self.start_position: + s += ", end_position: %d" % (self.end_position) + if self.start_position: + s += ", is_impossible: %r" % (self.is_impossible) + return s + + +class InputFeatures(object): + """A single set of features of data.""" + + def __init__(self, + unique_id, + example_index, + doc_span_index, + tokens, + token_to_orig_map, + token_is_max_context, + input_ids, + input_mask, + segment_ids, + start_position=None, + end_position=None, + is_impossible=None): + self.unique_id = unique_id + self.example_index = example_index + self.doc_span_index = doc_span_index + self.tokens = tokens + self.token_to_orig_map = token_to_orig_map + self.token_is_max_context = token_is_max_context + self.input_ids = input_ids + self.input_mask = input_mask + self.segment_ids = segment_ids + self.start_position = start_position + self.end_position = end_position + self.is_impossible = is_impossible + + +def read_squad_examples(input_file, is_training, questions, FLAGS_version_2_with_negative): + """Read a SQuAD json file into a list of SquadExample.""" + data = make_json(input_file, questions) + input_data = json.loads(data)["data"] + + def is_whitespace(c): + if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: + return True + return False + + examples = [] + for entry in input_data: + for paragraph in entry["paragraphs"]: + paragraph_text = paragraph["context"] + doc_tokens = [] + char_to_word_offset = [] + prev_is_whitespace = True + for c in paragraph_text: + if is_whitespace(c): + prev_is_whitespace = True + else: + if prev_is_whitespace: + doc_tokens.append(c) + else: + doc_tokens[-1] += c + prev_is_whitespace = False + char_to_word_offset.append(len(doc_tokens) - 1) + + for qa in paragraph["qas"]: + qas_id = qa["id"] + question_text = qa["question"] + start_position = None + end_position = None + orig_answer_text = None + is_impossible = False + if is_training: + + if FLAGS_version_2_with_negative: + is_impossible = qa["is_impossible"] + if (len(qa["answers"]) != 1) and (not is_impossible): + raise ValueError( + "For training, each question should have exactly 1 answer.") + if not is_impossible: + answer = qa["answers"][0] + orig_answer_text = answer["text"] + answer_offset = answer["answer_start"] + answer_length = len(orig_answer_text) + start_position = char_to_word_offset[answer_offset] + end_position = char_to_word_offset[answer_offset + answer_length - 1] + # Only add answers where the text can be exactly recovered from the + # document. If this CAN'T happen it's likely due to weird Unicode + # stuff so we will just skip the example. + # + # Note that this means for training mode, every example is NOT + # guaranteed to be preserved. + actual_text = " ".join( + doc_tokens[start_position:(end_position + 1)]) + cleaned_answer_text = " ".join( + tokenization.whitespace_tokenize(orig_answer_text)) + if actual_text.find(cleaned_answer_text) == -1: + tf.logging.warning("Could not find answer: '%s' vs. '%s'", + actual_text, cleaned_answer_text) + continue + else: + start_position = -1 + end_position = -1 + orig_answer_text = "" + + example = SquadExample( + qas_id=qas_id, + question_text=question_text, + doc_tokens=doc_tokens, + orig_answer_text=orig_answer_text, + start_position=start_position, + end_position=end_position, + is_impossible=is_impossible) + examples.append(example) + + return examples + + +def convert_examples_to_features(examples, tokenizer, max_seq_length, + doc_stride, max_query_length, is_training, + output_fn): + """Loads a data file into a list of `InputBatch`s.""" + + unique_id = 1000000000 + + for (example_index, example) in enumerate(examples): + query_tokens = tokenizer.tokenize(example.question_text) + + if len(query_tokens) > max_query_length: + query_tokens = query_tokens[0:max_query_length] + + tok_to_orig_index = [] + orig_to_tok_index = [] + all_doc_tokens = [] + for (i, token) in enumerate(example.doc_tokens): + orig_to_tok_index.append(len(all_doc_tokens)) + sub_tokens = tokenizer.tokenize(token) + for sub_token in sub_tokens: + tok_to_orig_index.append(i) + all_doc_tokens.append(sub_token) + + tok_start_position = None + tok_end_position = None + if is_training and example.is_impossible: + tok_start_position = -1 + tok_end_position = -1 + if is_training and not example.is_impossible: + tok_start_position = orig_to_tok_index[example.start_position] + if example.end_position < len(example.doc_tokens) - 1: + tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 + else: + tok_end_position = len(all_doc_tokens) - 1 + (tok_start_position, tok_end_position) = _improve_answer_span( + all_doc_tokens, tok_start_position, tok_end_position, tokenizer, + example.orig_answer_text) + + # The -3 accounts for [CLS], [SEP] and [SEP] + max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 + + # We can have documents that are longer than the maximum sequence length. + # To deal with this we do a sliding window approach, where we take chunks + # of the up to our max length with a stride of `doc_stride`. + _DocSpan = collections.namedtuple( # pylint: disable=invalid-name + "DocSpan", ["start", "length"]) + doc_spans = [] + start_offset = 0 + while start_offset < len(all_doc_tokens): + length = len(all_doc_tokens) - start_offset + if length > max_tokens_for_doc: + length = max_tokens_for_doc + doc_spans.append(_DocSpan(start=start_offset, length=length)) + if start_offset + length == len(all_doc_tokens): + break + start_offset += min(length, doc_stride) + + for (doc_span_index, doc_span) in enumerate(doc_spans): + tokens = [] + token_to_orig_map = {} + token_is_max_context = {} + segment_ids = [] + tokens.append("[CLS]") + segment_ids.append(0) + for token in query_tokens: + tokens.append(token) + segment_ids.append(0) + tokens.append("[SEP]") + segment_ids.append(0) + + for i in range(doc_span.length): + split_token_index = doc_span.start + i + token_to_orig_map[len( + tokens)] = tok_to_orig_index[split_token_index] + + is_max_context = _check_is_max_context(doc_spans, doc_span_index, + split_token_index) + token_is_max_context[len(tokens)] = is_max_context + tokens.append(all_doc_tokens[split_token_index]) + segment_ids.append(1) + tokens.append("[SEP]") + segment_ids.append(1) + + input_ids = tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real + # tokens are attended to. + input_mask = [1] * len(input_ids) + + # Zero-pad up to the sequence length. + while len(input_ids) < max_seq_length: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + start_position = None + end_position = None + if is_training and not example.is_impossible: + # For training, if our document chunk does not contain an annotation + # we throw it out, since there is nothing to predict. + doc_start = doc_span.start + doc_end = doc_span.start + doc_span.length - 1 + out_of_span = False + if not (tok_start_position >= doc_start and tok_end_position <= doc_end): + out_of_span = True + if out_of_span: + start_position = 0 + end_position = 0 + else: + doc_offset = len(query_tokens) + 2 + start_position = tok_start_position - doc_start + doc_offset + end_position = tok_end_position - doc_start + doc_offset + + if is_training and example.is_impossible: + start_position = 0 + end_position = 0 + + if example_index < 20: + tf.logging.info("*** Example ***") + tf.logging.info("unique_id: %s" % (unique_id)) + tf.logging.info("example_index: %s" % (example_index)) + tf.logging.info("doc_span_index: %s" % (doc_span_index)) + tf.logging.info("tokens: %s" % " ".join( + [tokenization.printable_text(x) for x in tokens])) + tf.logging.info("token_to_orig_map: %s" % " ".join( + ["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)])) + tf.logging.info("token_is_max_context: %s" % " ".join([ + "%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context) + ])) + tf.logging.info("input_ids: %s" % + " ".join([str(x) for x in input_ids])) + tf.logging.info( + "input_mask: %s" % " ".join([str(x) for x in input_mask])) + tf.logging.info( + "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) + if is_training and example.is_impossible: + tf.logging.info("impossible example") + if is_training and not example.is_impossible: + answer_text = " ".join( + tokens[start_position:(end_position + 1)]) + tf.logging.info("start_position: %d" % (start_position)) + tf.logging.info("end_position: %d" % (end_position)) + tf.logging.info( + "answer: %s" % (tokenization.printable_text(answer_text))) + + feature = InputFeatures( + unique_id=unique_id, + example_index=example_index, + doc_span_index=doc_span_index, + tokens=tokens, + token_to_orig_map=token_to_orig_map, + token_is_max_context=token_is_max_context, + input_ids=input_ids, + input_mask=input_mask, + segment_ids=segment_ids, + start_position=start_position, + end_position=end_position, + is_impossible=example.is_impossible) + + # Run callback + output_fn(feature) + + unique_id += 1 + + +def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, + orig_answer_text): + """Returns tokenized answer spans that better match the annotated answer.""" + + # The SQuAD annotations are character based. We first project them to + # whitespace-tokenized words. But then after WordPiece tokenization, we can + # often find a "better match". For example: + # + # Question: What year was John Smith born? + # Context: The leader was John Smith (1895-1943). + # Answer: 1895 + # + # The original whitespace-tokenized answer will be "(1895-1943).". However + # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match + # the exact answer, 1895. + # + # However, this is not always possible. Consider the following: + # + # Question: What country is the top exporter of electornics? + # Context: The Japanese electronics industry is the lagest in the world. + # Answer: Japan + # + # In this case, the annotator chose "Japan" as a character sub-span of + # the word "Japanese". Since our WordPiece tokenizer does not split + # "Japanese", we just use "Japanese" as the annotation. This is fairly rare + # in SQuAD, but does happen. + tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) + + for new_start in range(input_start, input_end + 1): + for new_end in range(input_end, new_start - 1, -1): + text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) + if text_span == tok_answer_text: + return (new_start, new_end) + + return (input_start, input_end) + + +def _check_is_max_context(doc_spans, cur_span_index, position): + """Check if this is the 'max context' doc span for the token.""" + + # Because of the sliding window approach taken to scoring documents, a single + # token can appear in multiple documents. E.g. + # Doc: the man went to the store and bought a gallon of milk + # Span A: the man went to the + # Span B: to the store and bought + # Span C: and bought a gallon of + # ... + # + # Now the word 'bought' will have two scores from spans B and C. We only + # want to consider the score with "maximum context", which we define as + # the *minimum* of its left and right context (the *sum* of left and + # right context will always be the same, of course). + # + # In the example the maximum context for 'bought' would be span C since + # it has 1 left context and 3 right context, while span B has 4 left context + # and 0 right context. + best_score = None + best_span_index = None + for (span_index, doc_span) in enumerate(doc_spans): + end = doc_span.start + doc_span.length - 1 + if position < doc_span.start: + continue + if position > end: + continue + num_left_context = position - doc_span.start + num_right_context = end - position + score = min(num_left_context, num_right_context) + \ + 0.01 * doc_span.length + if best_score is None or score > best_score: + best_score = score + best_span_index = span_index + + return cur_span_index == best_span_index + + +def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, + use_one_hot_embeddings): + """Creates a classification model.""" + model = modeling.BertModel( + config=bert_config, + is_training=is_training, + input_ids=input_ids, + input_mask=input_mask, + token_type_ids=segment_ids, + use_one_hot_embeddings=use_one_hot_embeddings) + + final_hidden = model.get_sequence_output() + + final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3) + batch_size = final_hidden_shape[0] + seq_length = final_hidden_shape[1] + hidden_size = final_hidden_shape[2] + + output_weights = tf.get_variable( + "cls/squad/output_weights", [2, hidden_size], + initializer=tf.truncated_normal_initializer(stddev=0.02)) + + output_bias = tf.get_variable( + "cls/squad/output_bias", [2], initializer=tf.zeros_initializer()) + + final_hidden_matrix = tf.reshape(final_hidden, + [batch_size * seq_length, hidden_size]) + logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True) + logits = tf.nn.bias_add(logits, output_bias) + + logits = tf.reshape(logits, [batch_size, seq_length, 2]) + logits = tf.transpose(logits, [2, 0, 1]) + + unstacked_logits = tf.unstack(logits, axis=0) + + (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1]) + + return (start_logits, end_logits) + + +def model_fn_builder(bert_config, init_checkpoint, learning_rate, + num_train_steps, num_warmup_steps, use_tpu, + use_one_hot_embeddings): + """Returns `model_fn` closure for TPUEstimator.""" + + def model_fn(features, labels, mode, params): # pylint: disable=unused-argument + """The `model_fn` for TPUEstimator.""" + + tf.logging.info("*** Features ***") + for name in sorted(features.keys()): + tf.logging.info(" name = %s, shape = %s" % + (name, features[name].shape)) + + input_ids = features["input_ids"] + input_mask = features["input_mask"] + segment_ids = features["segment_ids"] + + is_training = (mode == tf.estimator.ModeKeys.TRAIN) + + (start_logits, end_logits) = create_model( + bert_config=bert_config, + is_training=is_training, + input_ids=input_ids, + input_mask=input_mask, + segment_ids=segment_ids, + use_one_hot_embeddings=use_one_hot_embeddings) + + tvars = tf.trainable_variables() + + initialized_variable_names = {} + scaffold_fn = None + if init_checkpoint: + (assignment_map, initialized_variable_names + ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) + if use_tpu: + + def tpu_scaffold(): + tf.train.init_from_checkpoint( + init_checkpoint, assignment_map) + return tf.train.Scaffold() + + scaffold_fn = tpu_scaffold + else: + tf.train.init_from_checkpoint(init_checkpoint, assignment_map) + + tf.logging.info("**** Trainable Variables ****") + for var in tvars: + init_string = "" + if var.name in initialized_variable_names: + init_string = ", *INIT_FROM_CKPT*" + tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, + init_string) + + output_spec = None + if mode == tf.estimator.ModeKeys.TRAIN: + seq_length = modeling.get_shape_list(input_ids)[1] + + def compute_loss(logits, positions): + one_hot_positions = tf.one_hot( + positions, depth=seq_length, dtype=tf.float32) + log_probs = tf.nn.log_softmax(logits, axis=-1) + loss = -tf.reduce_mean( + tf.reduce_sum(one_hot_positions * log_probs, axis=-1)) + return loss + + start_positions = features["start_positions"] + end_positions = features["end_positions"] + + start_loss = compute_loss(start_logits, start_positions) + end_loss = compute_loss(end_logits, end_positions) + + total_loss = (start_loss + end_loss) / 2.0 + + train_op = optimization.create_optimizer( + total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) + + output_spec = tf.contrib.tpu.TPUEstimatorSpec( + mode=mode, + loss=total_loss, + train_op=train_op, + scaffold_fn=scaffold_fn) + elif mode == tf.estimator.ModeKeys.PREDICT: + predictions = { + # "unique_ids": unique_ids, + "start_logits": start_logits, + "end_logits": end_logits, + } + output_spec = tf.contrib.tpu.TPUEstimatorSpec( + mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) + else: + raise ValueError( + "Only TRAIN and PREDICT modes are supported: %s" % (mode)) + + return output_spec + + return model_fn + + +def input_fn_builder(input_file, seq_length, is_training, drop_remainder): + """Creates an `input_fn` closure to be passed to TPUEstimator.""" + + name_to_features = { + "unique_ids": tf.FixedLenFeature([], tf.int64), + "input_ids": tf.FixedLenFeature([seq_length], tf.int64), + "input_mask": tf.FixedLenFeature([seq_length], tf.int64), + "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), + } + + if is_training: + name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64) + name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64) + + def _decode_record(record, name_to_features): + """Decodes a record to a TensorFlow example.""" + example = tf.parse_single_example(record, name_to_features) + + # tf.Example only supports tf.int64, but the TPU only supports tf.int32. + # So cast all int64 to int32. + for name in list(example.keys()): + t = example[name] + if t.dtype == tf.int64: + t = tf.to_int32(t) + example[name] = t + + return example + + def input_fn(params): + """The actual input function.""" + batch_size = params["batch_size"] + + # For training, we want a lot of parallel reading and shuffling. + # For eval, we want no shuffling and parallel reading doesn't matter. + d = tf.data.TFRecordDataset(input_file) + if is_training: + d = d.repeat() + d = d.shuffle(buffer_size=100) + + d = d.apply( + tf.contrib.data.map_and_batch( + lambda record: _decode_record(record, name_to_features), + batch_size=batch_size, + drop_remainder=drop_remainder)) + + return d + + return input_fn + + +RawResult = collections.namedtuple("RawResult", + ["unique_id", "start_logits", "end_logits"]) + + +def write_predictions(all_examples, all_features, all_results, n_best_size, + max_answer_length, do_lower_case, output_prediction_file, + output_nbest_file, output_null_log_odds_file, FLAGS_version_2_with_negative): + """Write final predictions to the json file and log-odds of null if needed.""" + tf.logging.info("Writing predictions to: %s" % (output_prediction_file)) + tf.logging.info("Writing nbest to: %s" % (output_nbest_file)) + + example_index_to_features = collections.defaultdict(list) + for feature in all_features: + example_index_to_features[feature.example_index].append(feature) + + unique_id_to_result = {} + for result in all_results: + unique_id_to_result[result.unique_id] = result + + _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name + "PrelimPrediction", + ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) + + all_predictions = collections.OrderedDict() + all_nbest_json = collections.OrderedDict() + + for (example_index, example) in enumerate(all_examples): + features = example_index_to_features[example_index] + + prelim_predictions = [] + # keep track of the minimum score of null start+end of position 0 + score_null = 1000000 # large and positive + for (feature_index, feature) in enumerate(features): + result = unique_id_to_result[feature.unique_id] + start_indexes = _get_best_indexes(result.start_logits, n_best_size) + end_indexes = _get_best_indexes(result.end_logits, n_best_size) + # if we could have irrelevant answers, get the min score of irrelevant + if FLAGS_version_2_with_negative: + feature_null_score = result.start_logits[0] + \ + result.end_logits[0] + if feature_null_score < score_null: + score_null = feature_null_score + for start_index in start_indexes: + for end_index in end_indexes: + # We could hypothetically create invalid predictions, e.g., predict + # that the start of the span is in the question. We throw out all + # invalid predictions. + if start_index >= len(feature.tokens): + continue + if end_index >= len(feature.tokens): + continue + if start_index not in feature.token_to_orig_map: + continue + if end_index not in feature.token_to_orig_map: + continue + if not feature.token_is_max_context.get(start_index, False): + continue + if end_index < start_index: + continue + length = end_index - start_index + 1 + if length > max_answer_length: + continue + prelim_predictions.append( + _PrelimPrediction( + feature_index=feature_index, + start_index=start_index, + end_index=end_index, + start_logit=result.start_logits[start_index], + end_logit=result.end_logits[end_index])) + + prelim_predictions = sorted( + prelim_predictions, + key=lambda x: (x.start_logit + x.end_logit), + reverse=True) + + _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name + "NbestPrediction", ["text", "start_logit", "end_logit"]) + + seen_predictions = {} + nbest = [] + for pred in prelim_predictions: + if len(nbest) >= n_best_size: + break + feature = features[pred.feature_index] + if pred.start_index > 0: # this is a non-null prediction + tok_tokens = feature.tokens[pred.start_index:( + pred.end_index + 1)] + orig_doc_start = feature.token_to_orig_map[pred.start_index] + orig_doc_end = feature.token_to_orig_map[pred.end_index] + orig_tokens = example.doc_tokens[orig_doc_start:( + orig_doc_end + 1)] + tok_text = " ".join(tok_tokens) + + # De-tokenize WordPieces that have been split off. + tok_text = tok_text.replace(" ##", "") + tok_text = tok_text.replace("##", "") + + # Clean whitespace + tok_text = tok_text.strip() + tok_text = " ".join(tok_text.split()) + orig_text = " ".join(orig_tokens) + + final_text = get_final_text(tok_text, orig_text, do_lower_case) + if final_text in seen_predictions: + continue + + seen_predictions[final_text] = True + else: + final_text = "" + seen_predictions[final_text] = True + + nbest.append( + _NbestPrediction( + text=final_text, + start_logit=pred.start_logit, + end_logit=pred.end_logit)) + + # In very rare edge cases we could have no valid predictions. So we + # just create a nonce prediction in this case to avoid failure. + if not nbest: + nbest.append( + _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) + + assert len(nbest) >= 1 + + total_scores = [] + best_non_null_entry = None + for entry in nbest: + total_scores.append(entry.start_logit + entry.end_logit) + if not best_non_null_entry: + if entry.text: + best_non_null_entry = entry + + probs = _compute_softmax(total_scores) + + nbest_json = [] + for (i, entry) in enumerate(nbest): + output = collections.OrderedDict() + output["text"] = entry.text + output["probability"] = probs[i] + output["start_logit"] = entry.start_logit + output["end_logit"] = entry.end_logit + nbest_json.append(output) + + assert len(nbest_json) >= 1 + + all_predictions[example.qas_id] = nbest_json[0]["text"] + + all_nbest_json[example.qas_id] = nbest_json + + with tf.gfile.GFile(output_prediction_file, "w") as writer: + writer.write(json.dumps(all_predictions, indent=4) + "\n") + + +def get_final_text(pred_text, orig_text, do_lower_case): + """Project the tokenized prediction back to the original text.""" + + # When we created the data, we kept track of the alignment between original + # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So + # now `orig_text` contains the span of our original text corresponding to the + # span that we predicted. + # + # However, `orig_text` may contain extra characters that we don't want in + # our prediction. + # + # For example, let's say: + # pred_text = steve smith + # orig_text = Steve Smith's + # + # We don't want to return `orig_text` because it contains the extra "'s". + # + # We don't want to return `pred_text` because it's already been normalized + # (the SQuAD eval script also does punctuation stripping/lower casing but + # our tokenizer does additional normalization like stripping accent + # characters). + # + # What we really want to return is "Steve Smith". + # + # Therefore, we have to apply a semi-complicated alignment heruistic between + # `pred_text` and `orig_text` to get a character-to-charcter alignment. This + # can fail in certain cases in which case we just return `orig_text`. + + def _strip_spaces(text): + ns_chars = [] + ns_to_s_map = collections.OrderedDict() + for (i, c) in enumerate(text): + if c == " ": + continue + ns_to_s_map[len(ns_chars)] = i + ns_chars.append(c) + ns_text = "".join(ns_chars) + return (ns_text, ns_to_s_map) + + # We first tokenize `orig_text`, strip whitespace from the result + # and `pred_text`, and check if they are the same length. If they are + # NOT the same length, the heuristic has failed. If they are the same + # length, we assume the characters are one-to-one aligned. + tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case) + + tok_text = " ".join(tokenizer.tokenize(orig_text)) + + start_position = tok_text.find(pred_text) + if start_position == -1: + if FLAGS_verbose_logging: + tf.logging.info( + "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) + return orig_text + end_position = start_position + len(pred_text) - 1 + + (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) + (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) + + if len(orig_ns_text) != len(tok_ns_text): + if FLAGS_verbose_logging: + tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'", + orig_ns_text, tok_ns_text) + return orig_text + + # We then project the characters in `pred_text` back to `orig_text` using + # the character-to-character alignment. + tok_s_to_ns_map = {} + for (i, tok_index) in six.iteritems(tok_ns_to_s_map): + tok_s_to_ns_map[tok_index] = i + + orig_start_position = None + if start_position in tok_s_to_ns_map: + ns_start_position = tok_s_to_ns_map[start_position] + if ns_start_position in orig_ns_to_s_map: + orig_start_position = orig_ns_to_s_map[ns_start_position] + + if orig_start_position is None: + if FLAGS_verbose_logging: + tf.logging.info("Couldn't map start position") + return orig_text + + orig_end_position = None + if end_position in tok_s_to_ns_map: + ns_end_position = tok_s_to_ns_map[end_position] + if ns_end_position in orig_ns_to_s_map: + orig_end_position = orig_ns_to_s_map[ns_end_position] + + if orig_end_position is None: + if FLAGS_verbose_logging: + tf.logging.info("Couldn't map end position") + return orig_text + + output_text = orig_text[orig_start_position:(orig_end_position + 1)] + return output_text + + +def _get_best_indexes(logits, n_best_size): + """Get the n-best logits from a list.""" + index_and_score = sorted( + enumerate(logits), key=lambda x: x[1], reverse=True) + + best_indexes = [] + for i in range(len(index_and_score)): + if i >= n_best_size: + break + best_indexes.append(index_and_score[i][0]) + return best_indexes + + +def _compute_softmax(scores): + """Compute softmax probability over raw logits.""" + if not scores: + return [] + + max_score = None + for score in scores: + if max_score is None or score > max_score: + max_score = score + + exp_scores = [] + total_sum = 0.0 + for score in scores: + x = math.exp(score - max_score) + exp_scores.append(x) + total_sum += x + + probs = [] + for score in exp_scores: + probs.append(score / total_sum) + return probs + + +class FeatureWriter(object): + """Writes InputFeature to TF example file.""" + + def __init__(self, filename, is_training): + self.filename = filename + self.is_training = is_training + self.num_features = 0 + self._writer = tf.python_io.TFRecordWriter(filename) + + def process_feature(self, feature): + """Write a InputFeature to the TFRecordWriter as a tf.train.Example.""" + self.num_features += 1 + + def create_int_feature(values): + feature = tf.train.Feature( + int64_list=tf.train.Int64List(value=list(values))) + return feature + + features = collections.OrderedDict() + features["unique_ids"] = create_int_feature([feature.unique_id]) + features["input_ids"] = create_int_feature(feature.input_ids) + features["input_mask"] = create_int_feature(feature.input_mask) + features["segment_ids"] = create_int_feature(feature.segment_ids) + + if self.is_training: + features["start_positions"] = create_int_feature( + [feature.start_position]) + features["end_positions"] = create_int_feature( + [feature.end_position]) + impossible = 0 + if feature.is_impossible: + impossible = 1 + features["is_impossible"] = create_int_feature([impossible]) + + tf_example = tf.train.Example( + features=tf.train.Features(feature=features)) + self._writer.write(tf_example.SerializeToString()) + + def close(self): + self._writer.close() + + +def validate_flags_or_throw(bert_config): + """Validate the input FLAGS or throw an exception.""" + tokenization.validate_case_matches_checkpoint(FLAGS_do_lower_case, + FLAGS_init_checkpoint_squad) + + # if not FLAGS_do_train and not FLAGS_do_predict: + # raise ValueError( + # "At least one of `do_train` or `do_predict` must be True.") + + # if FLAGS_do_train: + # if not FLAGS_train_file: + # raise ValueError( + # "If `do_train` is True, then `train_file` must be specified.") + # if FLAGS_do_predict: + # if not FLAGS_predict_file: + # raise ValueError( + # "If `do_predict` is True, then `predict_file` must be specified.") + + # if FLAGS_max_seq_length > bert_config.max_position_embeddings: + # raise ValueError( + # "Cannot use sequence length %d because the BERT model " + # "was only trained up to sequence length %d" % + # (FLAGS_max_seq_length, bert_config.max_position_embeddings)) + + # if FLAGS_max_seq_length <= FLAGS_max_query_length + 3: + # raise ValueError( + # "The max_seq_length (%d) must be greater than max_query_length " + # "(%d) + 3" % (FLAGS_max_seq_length, FLAGS_max_query_length)) diff --git a/nlp/scripts/api_squad_offline.py b/nlp/scripts/api_squad_offline.py new file mode 100644 index 0000000..8a05141 --- /dev/null +++ b/nlp/scripts/api_squad_offline.py @@ -0,0 +1,264 @@ +#!/usr/bin/env python +# coding: utf-8 + +# auther = 'liuzhiyong' +# date = 20201204 + + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from flask import Flask, abort, request, jsonify +from concurrent.futures import ThreadPoolExecutor + +import os +import random +import modeling +import tokenization +import tensorflow as tf +import sys + +from api_squad import FLAGS_max_seq_length +from api_squad import FLAGS_do_lower_case +from api_squad import FLAGS_use_tpu +from api_squad import FLAGS_tpu_name +from api_squad import FLAGS_tpu_zone +from api_squad import FLAGS_gcp_project +from api_squad import FLAGS_master +from api_squad import FLAGS_save_checkpoints_steps +from api_squad import FLAGS_iterations_per_loop +from api_squad import FLAGS_num_tpu_cores +from api_squad import FLAGS_warmup_proportion +from api_squad import FLAGS_doc_stride +from api_squad import model_fn_builder +from api_squad import FeatureWriter +from api_squad import convert_examples_to_features +from api_squad import input_fn_builder + +from global_setting import CUDA_VISIBLE_DEVICES +from global_setting import validate_flags_or_throw +from global_setting import read_squad_examples +from global_setting import FLAGS_bert_config_file, FLAGS_vocab_file, FLAGS_init_checkpoint_squad, questions + +os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" +os.environ["CUDA_VISIBLE_DEVICES"] = str(CUDA_VISIBLE_DEVICES) + +app = Flask(__name__) + + +def serving_input_fn(): + input_ids = tf.placeholder(tf.int32, [None, FLAGS_max_seq_length], name='input_ids') + unique_id = tf.placeholder(tf.int32, [None]) + input_mask = tf.placeholder(tf.int32, [None, FLAGS_max_seq_length], name='input_mask') + segment_ids = tf.placeholder(tf.int32, [None, FLAGS_max_seq_length], name='segment_ids') + input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({ + 'input_ids': input_ids, + 'input_mask': input_mask, + 'segment_ids': segment_ids, + 'unique_ids': unique_id, + })() + return input_fn + + +def main(FLAGS_output_dir, FLAGS_init_checkpoint_squad, FLAGS_export_dir, FLAGS_predict_file=None, FLAGS_train_file=None, FLAGS_do_predict=False, + FLAGS_do_train=False, FLAGS_train_batch_size=16, FLAGS_predict_batch_size=8, FLAGS_learning_rate=5e-5, FLAGS_num_train_epochs=3.0, + FLAGS_max_answer_length=100, FLAGS_max_query_length=64, FLAGS_version_2_with_negative=False): + tf.logging.set_verbosity(tf.logging.INFO) + + bert_config = modeling.BertConfig.from_json_file(FLAGS_bert_config_file) + + validate_flags_or_throw(bert_config) + + tf.gfile.MakeDirs(FLAGS_output_dir) + + tokenizer = tokenization.FullTokenizer( + vocab_file=FLAGS_vocab_file, do_lower_case=FLAGS_do_lower_case) + + tpu_cluster_resolver = None + if FLAGS_use_tpu and FLAGS_tpu_name: + tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( + FLAGS_tpu_name, zone=FLAGS_tpu_zone, project=FLAGS_gcp_project) + is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 + run_config = tf.contrib.tpu.RunConfig( + cluster=tpu_cluster_resolver, + master=FLAGS_master, + model_dir=FLAGS_output_dir, + save_checkpoints_steps=FLAGS_save_checkpoints_steps, + tpu_config=tf.contrib.tpu.TPUConfig( + iterations_per_loop=FLAGS_iterations_per_loop, + num_shards=FLAGS_num_tpu_cores, + per_host_input_for_training=is_per_host)) + + train_examples = None + num_train_steps = None + num_warmup_steps = None + + if FLAGS_do_train: + train_examples = read_squad_examples( + input_file=FLAGS_train_file, is_training=True, questions=questions, FLAGS_version_2_with_negative=FLAGS_version_2_with_negative) + num_train_steps = int( + len(train_examples) / FLAGS_train_batch_size * FLAGS_num_train_epochs) + num_warmup_steps = int(num_train_steps * FLAGS_warmup_proportion) + + # Pre-shuffle the input to avoid having to make a very large shuffle + # buffer in in the `input_fn`. + rng = random.Random(12345) + rng.shuffle(train_examples) + + model_fn = model_fn_builder( + bert_config=bert_config, + init_checkpoint=FLAGS_init_checkpoint_squad, + learning_rate=FLAGS_learning_rate, + num_train_steps=num_train_steps, + num_warmup_steps=num_warmup_steps, + use_tpu=FLAGS_use_tpu, + use_one_hot_embeddings=FLAGS_use_tpu) + + # If TPU is not available, this will fall back to normal Estimator on CPU + # or GPU. + estimator = tf.contrib.tpu.TPUEstimator( + use_tpu=FLAGS_use_tpu, + model_fn=model_fn, + config=run_config, + train_batch_size=FLAGS_train_batch_size, + predict_batch_size=FLAGS_predict_batch_size) + + if FLAGS_do_train: + # We write to a temporary file to avoid storing very large constant tensors + # in memory. + train_writer = FeatureWriter( + filename=os.path.join(FLAGS_output_dir, "train.tf_record"), + is_training=True) + convert_examples_to_features( + examples=train_examples, + tokenizer=tokenizer, + max_seq_length=FLAGS_max_seq_length, + doc_stride=FLAGS_doc_stride, + max_query_length=FLAGS_max_query_length, + is_training=True, + output_fn=train_writer.process_feature) + train_writer.close() + + tf.logging.info("***** Running training *****") + tf.logging.info(" Num orig examples = %d", len(train_examples)) + tf.logging.info(" Num split examples = %d", train_writer.num_features) + tf.logging.info(" Batch size = %d", FLAGS_train_batch_size) + tf.logging.info(" Num steps = %d", num_train_steps) + del train_examples + + train_input_fn = input_fn_builder( + input_file=train_writer.filename, + seq_length=FLAGS_max_seq_length, + is_training=True, + drop_remainder=True) + estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) + estimator._export_to_tpu = False + estimator.export_savedmodel(FLAGS_export_dir, serving_input_fn) + return 'success' + + +class AI2Flask: + + def __init__(self, port=5000, workers=4): + self.app = app + self.port = port + p = ThreadPoolExecutor(max_workers=workers) + threads_mapping = {} + + def check_threads(): + flag = False + pop_keys = set() + if len(threads_mapping) >= workers: + for k, v in threads_mapping.items(): + if v.running(): + flag = True + else: + pop_keys.add(k) + + for k in pop_keys: + threads_mapping.pop(k) + + return flag + + @app.route('/api/offline/train', methods=['POST']) + def text_analyse(): + if not request.json or 'task_id' not in request.json: + abort(400) + if check_threads(): + return jsonify({"Des": "Task list is full. Can not submit new task! ", "Result": "Failed to submit the training task ", "Status": "ERROR"}) + + else: + try: + FLAGS_train_batch_size = request.json['FLAGS_train_batch_size'] + except: + FLAGS_train_batch_size = 16 + try: + FLAGS_learning_rate = request.json['FLAGS_learning_rate'] + except: + FLAGS_learning_rate = 5e-5 + try: + FLAGS_num_train_epochs = request.json['FLAGS_num_train_epochs'] + except: + FLAGS_num_train_epochs = 3.0 + try: + FLAGS_max_answer_length = request.json['FLAGS_max_answer_length'] + except: + FLAGS_max_answer_length = 100 + try: + FLAGS_max_query_length = request.json['FLAGS_max_query_length'] + except: + FLAGS_max_query_length = 64 + try: + FLAGS_version_2_with_negative = request.json['FLAGS_version_2_with_negative'] + except: + FLAGS_version_2_with_negative = True + + try: + FLAGS_predict_file = None + FLAGS_predict_batch_size = 8 + FLAGS_do_predict = False + FLAGS_do_train = True + FLAGS_output_dir = request.json['FLAGS_output_dir'] + FLAGS_train_file = request.json['FLAGS_train_file'] + FLAGS_export_dir = request.json['FLAGS_export_dir'] + task_id = request.json['task_id'] + + task = p.submit(main, FLAGS_output_dir, FLAGS_init_checkpoint_squad, FLAGS_export_dir, FLAGS_predict_file, FLAGS_train_file, FLAGS_do_predict, + FLAGS_do_train, FLAGS_train_batch_size, FLAGS_predict_batch_size, FLAGS_learning_rate, FLAGS_num_train_epochs, + FLAGS_max_answer_length, FLAGS_max_query_length, FLAGS_version_2_with_negative) + threads_mapping[task_id] = task + + return jsonify({"message": "Task submitted successfully", "status": "0"}) + + except KeyError as e: + return jsonify({"Des": 'KeyError: {}'.format(str(e)), "Result": 'None', "Status": "Error"}) + except Exception as e: + return jsonify({"Des": str(e), "Result": 'None', "Status": "Error"}) + + @app.route('/api/offline/status', methods=['POST']) + def todo_status(): + task_id = request.json['task_id'] + task = threads_mapping.get(task_id, None) + try: + if task is None: + return jsonify({'Des': 'The task was not found', 'Status': 'ERROR'}) + else: + if task.done(): + print(task.result) + if task.result() == 'success': + return jsonify({'Des': 'DONE', 'Status': 'OK'}) + else: + return jsonify({'Des': 'Program execution error. Please check the execution log ', 'Status': 'ERROR'}) + + else: + return jsonify({'Des': 'RUNNING', 'Status': 'OK'}) + except Exception as e: + return jsonify({'Des': str(e), 'Status': 'ERROR'}) + + def start(self): + self.app.run(host="0.0.0.0", port=self.port, threaded=True) + + +if __name__ == '__main__': + port = sys.argv[1] + AI2Flask(port=port).start() diff --git a/nlp/scripts/api_squad_online.py b/nlp/scripts/api_squad_online.py new file mode 100644 index 0000000..abe3d5f --- /dev/null +++ b/nlp/scripts/api_squad_online.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python +# coding: utf-8 + +# auther = 'liuzhiyong' +# date = 20201204 + + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import json +import sys +from flask import Flask, abort, request, jsonify + +import os +from global_setting import questions, tokenizer_ch, CUDA_VISIBLE_DEVICES +from create_squad_features import get_squad_feature_result + + +app = Flask(__name__) + + +class AI2Flask: + + def __init__(self, port=5000, workers=4): + self.app = app + self.port = port + + @app.route('/api/online/predict', methods=['POST']) + def text_analyse(): + if not request.json: + abort(400) + + else: + try: + try: + title = request.json['title'] + except: + title = 'Not available' + text_origin = request.json['text'] + + if len(text_origin) > 800: + text = text_origin[:800] + else: + text = text_origin + + result = {} + for ques in questions: + tmp = get_squad_feature_result(title=title, text=text, tokenizer=tokenizer_ch, question=[ques], url='http://localhost:8502/v1/models/predict:predict') + result[ques] = dict(tmp)[ques] + + print('finished!!') + return json.dumps(result) + + except KeyError as e: + return jsonify({"Des": 'KeyError: {}'.format(str(e)), "Result": 'None', "Status": "Error"}) + except Exception as e: + return jsonify({"Des": str(e), "Result": 'None', "Status": "Error"}) + + @app.route('/api/online/load', methods=['POST']) + def load_model(): + if not request.json: + abort(400) + else: + try: + path = request.json['path'] + flag = os.system('./load_model.sh ' + path + ' ' + CUDA_VISIBLE_DEVICES) + if flag == 0: + return jsonify({"Des": "Model loaded successfully !", "Status": "OK"}) + else: + return jsonify({"Des": "Model loaded failed , check the logs !", "Status": "Error"}) + except Exception as e: + return jsonify({"Des": str(e), "Status": "Error"}) + + def start(self): + self.app.run(host="0.0.0.0", port=self.port, threaded=True) + + +if __name__ == '__main__': + port = sys.argv[1] + AI2Flask(port=port).start() diff --git a/nlp/scripts/create_squad_features.py b/nlp/scripts/create_squad_features.py new file mode 100644 index 0000000..ce274e0 --- /dev/null +++ b/nlp/scripts/create_squad_features.py @@ -0,0 +1,721 @@ +#!/usr/bin/env python +# coding: utf-8 + +# auther = 'liuzhiyong' +# date = 20201204 + + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import json + +import collections +import math +import tokenization +import six +import tensorflow as tf +import requests + +from global_setting import _improve_answer_span + +version_2_with_negative = True + + +def get_squad_feature_result(title, text, tokenizer, question, url): + + def make_json(title, text, question): + res = {} + res['data'] = [] + data_inside = {} + + data_inside['title'] = title + data_inside['paragraphs'] = [] + paragraphs_inside = {} + paragraphs_inside['context'] = text + paragraphs_inside['qas'] = [] + for ques in question: + qas_inside = {} + qas_inside['answers'] = [] + + answer_inside = {} + + qas_inside['id'] = ques + qas_inside['question'] = ques + qas_inside['answers'].append(answer_inside.copy()) + paragraphs_inside['qas'].append(qas_inside.copy()) + data_inside['paragraphs'].append(paragraphs_inside.copy()) + + res['data'].append(data_inside.copy()) + return json.dumps(res) + + def _compute_softmax(scores): + """Compute softmax probability over raw logits.""" + if not scores: + return [] + + max_score = None + for score in scores: + if max_score is None or score > max_score: + max_score = score + + exp_scores = [] + total_sum = 0.0 + for score in scores: + x = math.exp(score - max_score) + exp_scores.append(x) + total_sum += x + + probs = [] + for score in exp_scores: + probs.append(score / total_sum) + return probs + + def get_final_text(pred_text, orig_text, do_lower_case): + + def _strip_spaces(text): + ns_chars = [] + ns_to_s_map = collections.OrderedDict() + for (i, c) in enumerate(text): + if c == " ": + continue + ns_to_s_map[len(ns_chars)] = i + ns_chars.append(c) + ns_text = "".join(ns_chars) + return (ns_text, ns_to_s_map) + + # We first tokenize `orig_text`, strip whitespace from the result + # and `pred_text`, and check if they are the same length. If they are + # NOT the same length, the heuristic has failed. If they are the same + # length, we assume the characters are one-to-one aligned. + tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case) + + tok_text = " ".join(tokenizer.tokenize(orig_text)) + + start_position = tok_text.find(pred_text) + if start_position == -1: + if 0: + tf.logging.info( + "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) + return orig_text + end_position = start_position + len(pred_text) - 1 + + (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) + (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) + + if len(orig_ns_text) != len(tok_ns_text): + if 0: + tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'", + orig_ns_text, tok_ns_text) + return orig_text + + # We then project the characters in `pred_text` back to `orig_text` using + # the character-to-character alignment. + tok_s_to_ns_map = {} + for (i, tok_index) in six.iteritems(tok_ns_to_s_map): + tok_s_to_ns_map[tok_index] = i + + orig_start_position = None + if start_position in tok_s_to_ns_map: + ns_start_position = tok_s_to_ns_map[start_position] + if ns_start_position in orig_ns_to_s_map: + orig_start_position = orig_ns_to_s_map[ns_start_position] + + if orig_start_position is None: + if 0: + tf.logging.info("Couldn't map start position") + return orig_text + + orig_end_position = None + if end_position in tok_s_to_ns_map: + ns_end_position = tok_s_to_ns_map[end_position] + if ns_end_position in orig_ns_to_s_map: + orig_end_position = orig_ns_to_s_map[ns_end_position] + + if orig_end_position is None: + if 0: + tf.logging.info("Couldn't map end position") + return orig_text + + output_text = orig_text[orig_start_position:(orig_end_position + 1)] + return output_text + + def _get_best_indexes(logits, n_best_size): + + index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) + + best_indexes = [] + for i in range(len(index_and_score)): + if i >= n_best_size: + break + best_indexes.append(index_and_score[i][0]) + return best_indexes + + RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"]) + + def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case): + """Write final predictions to the json file and log-odds of null if needed.""" + + example_index_to_features = collections.defaultdict(list) + for feature in all_features: + example_index_to_features[feature.example_index].append(feature) + + unique_id_to_result = {} + for result in all_results: + unique_id_to_result[result.unique_id] = result + + _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name + "PrelimPrediction", + ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) + + all_predictions = collections.OrderedDict() + all_nbest_json = collections.OrderedDict() + scores_diff_json = collections.OrderedDict() + + for (example_index, example) in enumerate(all_examples): + features = example_index_to_features[example_index] + + prelim_predictions = [] + # keep track of the minimum score of null start+end of position 0 + score_null = 1000000 # large and positive + min_null_feature_index = 0 # the paragraph slice with min mull score + null_start_logit = 0 # the start logit at the slice with min null score + null_end_logit = 0 # the end logit at the slice with min null score + for (feature_index, feature) in enumerate(features): + result = unique_id_to_result[feature.unique_id] + start_indexes = _get_best_indexes(result.start_logits, n_best_size) + end_indexes = _get_best_indexes(result.end_logits, n_best_size) + # if we could have irrelevant answers, get the min score of irrelevant + if version_2_with_negative: + feature_null_score = result.start_logits[0] + result.end_logits[0] + if feature_null_score < score_null: + score_null = feature_null_score + min_null_feature_index = feature_index + null_start_logit = result.start_logits[0] + null_end_logit = result.end_logits[0] + + for start_index in start_indexes: + for end_index in end_indexes: + # We could hypothetically create invalid predictions, e.g., predict + # that the start of the span is in the question. We throw out all + # invalid predictions. + if start_index >= len(feature.tokens): + continue + if end_index >= len(feature.tokens): + continue + if start_index not in feature.token_to_orig_map: + continue + if end_index not in feature.token_to_orig_map: + continue + if not feature.token_is_max_context.get(start_index, False): + continue + if end_index < start_index: + continue + length = end_index - start_index + 1 + if length > max_answer_length: + continue + prelim_predictions.append( + _PrelimPrediction( + feature_index=feature_index, + start_index=start_index, + end_index=end_index, + start_logit=result.start_logits[start_index], + end_logit=result.end_logits[end_index])) + + if version_2_with_negative: + prelim_predictions.append( + _PrelimPrediction( + feature_index=min_null_feature_index, + start_index=0, + end_index=0, + start_logit=null_start_logit, + end_logit=null_end_logit)) + prelim_predictions = sorted( + prelim_predictions, + key=lambda x: (x.start_logit + x.end_logit), + reverse=True) + + _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name + "NbestPrediction", ["text", "start_logit", "end_logit"]) + + seen_predictions = {} + nbest = [] + for pred in prelim_predictions: + if len(nbest) >= n_best_size: + break + feature = features[pred.feature_index] + if pred.start_index > 0: # this is a non-null prediction + tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] + orig_doc_start = feature.token_to_orig_map[pred.start_index] + orig_doc_end = feature.token_to_orig_map[pred.end_index] + orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] + tok_text = " ".join(tok_tokens) + + # De-tokenize WordPieces that have been split off. + tok_text = tok_text.replace(" ##", "") + tok_text = tok_text.replace("##", "") + + # Clean whitespace + tok_text = tok_text.strip() + tok_text = " ".join(tok_text.split()) + orig_text = " ".join(orig_tokens) + + final_text = get_final_text(tok_text, orig_text, do_lower_case) + if final_text in seen_predictions: + continue + + seen_predictions[final_text] = True + else: + final_text = "" + seen_predictions[final_text] = True + + nbest.append( + _NbestPrediction( + text=final_text, + start_logit=pred.start_logit, + end_logit=pred.end_logit)) + + # if we didn't inlude the empty option in the n-best, inlcude it + if version_2_with_negative: + if "" not in seen_predictions: + nbest.append( + _NbestPrediction( + text="", start_logit=null_start_logit, + end_logit=null_end_logit)) + + # In very rare edge cases we could have no valid predictions. So we + # just create a nonce prediction in this case to avoid failure. + if not nbest: + nbest.append( + _NbestPrediction(text="", start_logit=0.0, end_logit=0.0)) + + assert len(nbest) >= 1 + + total_scores = [] + best_non_null_entry = None + for entry in nbest: + total_scores.append(entry.start_logit + entry.end_logit) + if not best_non_null_entry: + if entry.text: + best_non_null_entry = entry + + probs = _compute_softmax(total_scores) + + nbest_json = [] + for (i, entry) in enumerate(nbest): + output = collections.OrderedDict() + output["text"] = entry.text + output["probability"] = probs[i] + output["start_logit"] = entry.start_logit + output["end_logit"] = entry.end_logit + nbest_json.append(output) + + assert len(nbest_json) >= 1 + + if not version_2_with_negative: + all_predictions[example.qas_id] = nbest_json[0]["text"] + else: + # predict "" iff the null score - the score of best non-null > threshold + score_diff = score_null - best_non_null_entry.start_logit - ( + best_non_null_entry.end_logit) + scores_diff_json[example.qas_id] = score_diff + if score_diff > 0: + all_predictions[example.qas_id] = "" + else: + all_predictions[example.qas_id] = best_non_null_entry.text + + all_nbest_json[example.qas_id] = nbest_json + return all_predictions + + def create_int_feature(values): + + feature = tf.train.Feature( + int64_list=tf.train.Int64List(value=list(values))) + return feature + + class InputFeatures(object): + """A single set of features of data.""" + + def __init__(self, + unique_id, + example_index, + doc_span_index, + tokens, + token_to_orig_map, + token_is_max_context, + input_ids, + input_mask, + segment_ids, + start_position=None, + end_position=None, + is_impossible=None): + self.unique_id = unique_id + self.example_index = example_index + self.doc_span_index = doc_span_index + self.tokens = tokens + self.token_to_orig_map = token_to_orig_map + self.token_is_max_context = token_is_max_context + self.input_ids = input_ids + self.input_mask = input_mask + self.segment_ids = segment_ids + self.start_position = start_position + self.end_position = end_position + self.is_impossible = is_impossible + + def _check_is_max_context(doc_spans, cur_span_index, position): + """Check if this is the 'max context' doc span for the token.""" + + # Because of the sliding window approach taken to scoring documents, a single + # token can appear in multiple documents. E.g. + # Doc: the man went to the store and bought a gallon of milk + # Span A: the man went to the + # Span B: to the store and bought + # Span C: and bought a gallon of + # ... + # + # Now the word 'bought' will have two scores from spans B and C. We only + # want to consider the score with "maximum context", which we define as + # the *minimum* of its left and right context (the *sum* of left and + # right context will always be the same, of course). + # + # In the example the maximum context for 'bought' would be span C since + # it has 1 left context and 3 right context, while span B has 4 left context + # and 0 right context. + best_score = None + best_span_index = None + for (span_index, doc_span) in enumerate(doc_spans): + end = doc_span.start + doc_span.length - 1 + if position < doc_span.start: + continue + if position > end: + continue + num_left_context = position - doc_span.start + num_right_context = end - position + score = min(num_left_context, num_right_context) + 0.01 * doc_span.length + if best_score is None or score > best_score: + best_score = score + best_span_index = span_index + + return cur_span_index == best_span_index + + def convert_examples_to_features(examples, tokenizer, max_seq_length, + doc_stride, max_query_length, is_training): + """Loads a data file into a list of `InputBatch`s.""" + + unique_id = 1000000000 + result = [] + + for (example_index, example) in enumerate(examples): + query_tokens = tokenizer.tokenize(example.question_text) + + if len(query_tokens) > max_query_length: + query_tokens = query_tokens[0:max_query_length] + + tok_to_orig_index = [] + orig_to_tok_index = [] + all_doc_tokens = [] + for (i, token) in enumerate(example.doc_tokens): + orig_to_tok_index.append(len(all_doc_tokens)) + sub_tokens = tokenizer.tokenize(token) + for sub_token in sub_tokens: + tok_to_orig_index.append(i) + all_doc_tokens.append(sub_token) + + tok_start_position = None + tok_end_position = None + if is_training and example.is_impossible: + tok_start_position = -1 + tok_end_position = -1 + if is_training and not example.is_impossible: + tok_start_position = orig_to_tok_index[example.start_position] + if example.end_position < len(example.doc_tokens) - 1: + tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 + else: + tok_end_position = len(all_doc_tokens) - 1 + (tok_start_position, tok_end_position) = _improve_answer_span( + all_doc_tokens, tok_start_position, tok_end_position, tokenizer, + example.orig_answer_text) + + # The -3 accounts for [CLS], [SEP] and [SEP] + max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 + + # We can have documents that are longer than the maximum sequence length. + # To deal with this we do a sliding window approach, where we take chunks + # of the up to our max length with a stride of `doc_stride`. + _DocSpan = collections.namedtuple( # pylint: disable=invalid-name + "DocSpan", ["start", "length"]) + doc_spans = [] + start_offset = 0 + while start_offset < len(all_doc_tokens): + length = len(all_doc_tokens) - start_offset + if length > max_tokens_for_doc: + length = max_tokens_for_doc + doc_spans.append(_DocSpan(start=start_offset, length=length)) + if start_offset + length == len(all_doc_tokens): + break + start_offset += min(length, doc_stride) + + for (doc_span_index, doc_span) in enumerate(doc_spans): + tokens = [] + token_to_orig_map = {} + token_is_max_context = {} + segment_ids = [] + tokens.append("[CLS]") + segment_ids.append(0) + for token in query_tokens: + tokens.append(token) + segment_ids.append(0) + tokens.append("[SEP]") + segment_ids.append(0) + + for i in range(doc_span.length): + split_token_index = doc_span.start + i + token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] + + is_max_context = _check_is_max_context(doc_spans, doc_span_index, + split_token_index) + token_is_max_context[len(tokens)] = is_max_context + tokens.append(all_doc_tokens[split_token_index]) + segment_ids.append(1) + tokens.append("[SEP]") + segment_ids.append(1) + + input_ids = tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real + # tokens are attended to. + input_mask = [1] * len(input_ids) + + # Zero-pad up to the sequence length. + while len(input_ids) < max_seq_length: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + start_position = None + end_position = None + if is_training and not example.is_impossible: + # For training, if our document chunk does not contain an annotation + # we throw it out, since there is nothing to predict. + doc_start = doc_span.start + doc_end = doc_span.start + doc_span.length - 1 + out_of_span = False + if not (tok_start_position >= doc_start and tok_end_position <= doc_end): + out_of_span = True + if out_of_span: + start_position = 0 + end_position = 0 + else: + doc_offset = len(query_tokens) + 2 + start_position = tok_start_position - doc_start + doc_offset + end_position = tok_end_position - doc_start + doc_offset + + if is_training and example.is_impossible: + start_position = 0 + end_position = 0 + + if example_index < 20: + tf.logging.info("*** Example ***") + tf.logging.info("unique_id: %s" % (unique_id)) + tf.logging.info("example_index: %s" % (example_index)) + tf.logging.info("doc_span_index: %s" % (doc_span_index)) + tf.logging.info("tokens: %s" % " ".join( + [tokenization.printable_text(x) for x in tokens])) + tf.logging.info("token_to_orig_map: %s" % " ".join( + ["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)])) + tf.logging.info("token_is_max_context: %s" % " ".join([ + "%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context) + ])) + tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) + tf.logging.info( + "input_mask: %s" % " ".join([str(x) for x in input_mask])) + tf.logging.info( + "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) + if is_training and example.is_impossible: + tf.logging.info("impossible example") + if is_training and not example.is_impossible: + answer_text = " ".join(tokens[start_position:(end_position + 1)]) + tf.logging.info("start_position: %d" % (start_position)) + tf.logging.info("end_position: %d" % (end_position)) + tf.logging.info( + "answer: %s" % (tokenization.printable_text(answer_text))) + + feature = InputFeatures( + unique_id=unique_id, + example_index=example_index, + doc_span_index=doc_span_index, + tokens=tokens, + token_to_orig_map=token_to_orig_map, + token_is_max_context=token_is_max_context, + input_ids=input_ids, + input_mask=input_mask, + segment_ids=segment_ids, + start_position=start_position, + end_position=end_position, + is_impossible=example.is_impossible) + + # Run callback + + result.append(feature) + unique_id += 1 + return result + + class SquadExample(object): + + def __init__(self, + qas_id, + question_text, + doc_tokens, + orig_answer_text=None, + start_position=None, + end_position=None, + is_impossible=False): + self.qas_id = qas_id + self.question_text = question_text + self.doc_tokens = doc_tokens + self.orig_answer_text = orig_answer_text + self.start_position = start_position + self.end_position = end_position + self.is_impossible = is_impossible + + def __str__(self): + return self.__repr__() + + def __repr__(self): + s = "" + s += "qas_id: %s" % (tokenization.printable_text(self.qas_id)) + s += ", question_text: %s" % ( + tokenization.printable_text(self.question_text)) + s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) + if self.start_position: + s += ", start_position: %d" % (self.start_position) + if self.start_position: + s += ", end_position: %d" % (self.end_position) + if self.start_position: + s += ", is_impossible: %r" % (self.is_impossible) + return s + + def read_squad_examples(input_file, is_training): + """Read a SQuAD json file into a list of SquadExample.""" + + input_data = json.loads(input_file)["data"] + + def is_whitespace(c): + if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: + return True + return False + + examples = [] + for entry in input_data: + for paragraph in entry["paragraphs"]: + paragraph_text = paragraph["context"] + doc_tokens = [] + char_to_word_offset = [] + prev_is_whitespace = True + for c in paragraph_text: + if is_whitespace(c): + prev_is_whitespace = True + else: + if prev_is_whitespace: + doc_tokens.append(c) + else: + doc_tokens[-1] += c + prev_is_whitespace = False + char_to_word_offset.append(len(doc_tokens) - 1) + + for qa in paragraph["qas"]: + qas_id = qa["id"] + question_text = qa["question"] + start_position = None + end_position = None + orig_answer_text = None + is_impossible = False + if is_training: + + if (len(qa["answers"]) != 1) and (not is_impossible): + raise ValueError( + "For training, each question should have exactly 1 answer.") + if not is_impossible: + answer = qa["answers"][0] + orig_answer_text = answer["text"] + answer_offset = answer["answer_start"] + answer_length = len(orig_answer_text) + start_position = char_to_word_offset[answer_offset] + end_position = char_to_word_offset[answer_offset + answer_length - 1] + # Only add answers where the text can be exactly recovered from the + # document. If this CAN'T happen it's likely due to weird Unicode + # stuff so we will just skip the example. + # + # Note that this means for training mode, every example is NOT + # guaranteed to be preserved. + actual_text = " ".join( + doc_tokens[start_position:(end_position + 1)]) + cleaned_answer_text = " ".join( + tokenization.whitespace_tokenize(orig_answer_text)) + if actual_text.find(cleaned_answer_text) == -1: + tf.logging.warning("Could not find answer: '%s' vs. '%s'", + actual_text, cleaned_answer_text) + continue + else: + start_position = -1 + end_position = -1 + orig_answer_text = "" + + example = SquadExample( + qas_id=qas_id, + question_text=question_text, + doc_tokens=doc_tokens, + orig_answer_text=orig_answer_text, + start_position=start_position, + end_position=end_position, + is_impossible=is_impossible) + examples.append(example) + + return examples + + def get_result(title, text, question, url): + + data = make_json(title, text, question) + + examples = read_squad_examples(data, False) + + predict_files = convert_examples_to_features( + examples=examples, + tokenizer=tokenizer, + max_seq_length=512, + doc_stride=128, + max_query_length=100, + is_training=False, + ) + + headers = {"content-type": "application/json"} + all_results = [] + for predict_file in predict_files: + features = {} + features["unique_ids"] = predict_file.unique_id + features["input_mask"] = predict_file.input_mask + features["segment_ids"] = predict_file.segment_ids + features["input_ids"] = predict_file.input_ids + data_list = [] + data_list.append(features) + + data = json.dumps({"instances": data_list}) + + json_response = requests.post(url, data=data, headers=headers) + + x = json.loads(json_response.text) + + all_results.append( + RawResult( + unique_id=predict_file.unique_id, + start_logits=x['predictions'][0]['start_logits'], + end_logits=x['predictions'][0]['end_logits'])) + + result = write_predictions(examples, predict_files, all_results, 20, 64, True) + return result + + return get_result(title, text, question, url) diff --git a/nlp/scripts/global_setting.py b/nlp/scripts/global_setting.py new file mode 100644 index 0000000..51dfec1 --- /dev/null +++ b/nlp/scripts/global_setting.py @@ -0,0 +1,31 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +# import collections +# import math +# import modeling +# import optimization +import tokenization +# import six +# import tensorflow as tf +# import os + +# Global variables + +# GPU number, default: -1, means not used +CUDA_VISIBLE_DEVICES = "2" + +# Questions to be trained/predicted +questions = ['Communication Service Name', 'Max Number of UEs', 'Data Rate Downlink', 'Latency', 'Data Rate Uplink', 'Resource Sharing Level', 'Mobility', 'Area'] + +# Configuration file +FLAGS_bert_config_file = '/home/run/uncased_L-12_H-768_A-12/bert_config.json' +FLAGS_vocab_file = '/home/run/uncased_L-12_H-768_A-12/vocab.txt' +FLAGS_init_checkpoint_squad = '/home/run/uncased_L-12_H-768_A-12/bert_model.ckpt' + +max_seq_length = 512 + + +tokenizer_ch = tokenization.FullTokenizer(vocab_file=FLAGS_vocab_file, do_lower_case=True) diff --git a/nlp/scripts/load_model.sh b/nlp/scripts/load_model.sh new file mode 100755 index 0000000..1961f6a --- /dev/null +++ b/nlp/scripts/load_model.sh @@ -0,0 +1,6 @@ +path=$1 +use_gpu=$2 +export CUDA_VISIBLE_DEVICES=$use_gpu +netstat -nap | grep 8502 | awk 'NR==1{printf $7}' | sed 's/\([0-9]*\).*/\1/g' | xargs kill -9 +sleep 5 +nohup tensorflow_model_server --port=8500 --rest_api_port=8502 --model_name=predict --model_base_path=$path > server.log 2>&1 & \ No newline at end of file diff --git a/nlp/test_1.py b/nlp/test_1.py new file mode 100644 index 0000000..3e50176 --- /dev/null +++ b/nlp/test_1.py @@ -0,0 +1,24 @@ +# -*- coding:utf-8 -*- +import pytest + + +@pytest.fixture(scope='function') +def setup_function(request): + def teardown_function(): + print("teardown_function called.") + request.addfinalizer(teardown_function) # 此内嵌函数做teardown工作 + print('setup_function called.') + + +@pytest.fixture(scope='module') +def setup_module(request): + def teardown_module(): + print("teardown_module called.") + request.addfinalizer(teardown_module) + print('setup_module called.') + + +# @pytest.mark.website +def test_1(setup_function): + print('Test_1 called.') + assert 1 == 1 diff --git a/nlp/tox.ini b/nlp/tox.ini new file mode 100644 index 0000000..2b6716e --- /dev/null +++ b/nlp/tox.ini @@ -0,0 +1,26 @@ +# content of: tox.ini , put in same dir as setup.py +[tox] +envlist = py36,pep8,cov +skipsdist = true + +[flake8] +ignore = E501,E722 +exclude = ./venv-tox,./.tox,./venv,./docs + +[testenv:pep8] +deps = flake8 +commands = flake8 + +[testenv] +deps = -r{toxinidir}/requirements.txt +commands = pytest + +[testenv:py36] +commands = + {[testenv]commands} + +[testenv:cov] +deps = pytest + pytest-cov +commands = pytest --cov-report=html + diff --git a/pom.xml b/pom.xml index a08b3f8..2ee21be 100644 --- a/pom.xml +++ b/pom.xml @@ -1,113 +1,63 @@ - + + --> + 4.0.0 org.onap.oparent oparent - 2.1.0 + 3.0.1 + - org.onap.usecase-ui - usecase-ui-nlp + + org.onap.usecase-ui.nlp + usecase-ui-nlp-parent 1.0.0-SNAPSHOT - usecase-ui-nlp pom - usecase-ui nlp - 4.0.0 + usecase-ui-nlp-parent + parent project for usecase-ui nlp + UTF-8 - . - xunit-results.xml - coverage.xml - py - python - **/**.py - **/tests/**.py,**/test*.py + UTF-8 + ${project.version} + yyyyMMdd'T'HHmm + https://nexus.onap.org + content/repositories/snapshots/ + content/repositories/releases/ + content/sites/site/${project.groupId}/${project.artifactId}/${project.version}/ - - - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - ${project.basedir}/mvn-phase-script.sh - - - ${project.groupId} - ${project.artifactId} - ${project.version} - - - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - clean phase script - clean - - exec - - - - __ - clean - - - - - test script - test - - exec - - - - __ - test - - - - - - - maven-assembly-plugin - - false - - assembly.xml - - - - - make-assembly - package - - single - - - - - - + + + + + nlp + standalone + diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 024d1b4..0000000 --- a/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -pytest -entrypoints==0.3 -Flask==1.1.1 -coverage diff --git a/scripts/api_squad.py b/scripts/api_squad.py deleted file mode 100644 index f29a74b..0000000 --- a/scripts/api_squad.py +++ /dev/null @@ -1,1028 +0,0 @@ -# coding=utf-8 -# squad interface -# Required parameters -# FLAGS_output_dir :the output path of the model training during training process, the output of the trained model, etc.; the output path of the model prediction during predicting process -# FLAGS_init_checkpoint_squad : model initialization path, use bert pre-trained model for training; use the output path during training for prediction -# FLAGS_predict_file : the file to be predicted, csv file -# FLAGS_train_file : file to be trained, csv file -# FLAGS_do_predict : whether to predict or not -# FLAGS_do_train : whether to train or not -# FLAGS_train_batch_size : the batch_size for training, default : 16 -# FLAGS_predict_batch_size : the batch_size when predicting, default: 8 -# FLAGS_learning_rate : the learning_rate at training time, default: 5e-5 -# FLAGS_num_train_epochs : epochs at training time, default: 3 -# FLAGS_max_answer_length : the maximum length of the answer, default: 100 characters -# FLAGS_max_query_length : the maximum length of the question, default: 64 -# FLAGS_version_2_with_negative : whether there is no answer to the question, default false, must be set to False when reasoning - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import json -import math -import modeling -import optimization -import tokenization -import six -import tensorflow as tf -import pandas as pd -from global_setting import FLAGS_init_checkpoint_squad - -FLAGS_max_seq_length = 512 -FLAGS_do_lower_case = True -FLAGS_doc_stride = 128 - - -FLAGS_save_checkpoints_steps = 1000 -FLAGS_iterations_per_loop = 1000 -FLAGS_n_best_size = 20 -FLAGS_tpu_zone = None -FLAGS_tpu_name = None -FLAGS_num_tpu_cores = 8 -FLAGS_verbose_logging = False -FLAGS_master = None -FLAGS_use_tpu = False -FLAGS_warmup_proportion = 0.1 -FLAGS_gcp_project = None -FLAGS_null_score_diff_threshold = 0.0 - - -def make_json(input_file, questions): - print(input_file) - data_train = pd.read_excel(input_file) - print(444) - data_train.fillna(0, inplace=True) - data_train.index = [i for i in range(len(data_train))] - question = questions - res = {} - res['data'] = [] - data_inside = {} - for i in data_train.index: - data_inside['title'] = 'Not available' - data_inside['paragraphs'] = [] - paragraphs_inside = {} - paragraphs_inside['context'] = data_train.loc[i, 'text'] - paragraphs_inside['qas'] = [] - for ques in question: - qas_inside = {} - qas_inside['answers'] = [] - if data_train.loc[i, ques]: - answer_inside = {} - answer_inside['text'] = str(data_train.loc[i, ques]) - answer_inside['answer_start'] = paragraphs_inside['context'].find(answer_inside['text']) - qas_inside['is_impossible'] = 0 - else: - qas_inside['is_impossible'] = 1 - answer_inside = {} - qas_inside['id'] = str(i) + ques - qas_inside['question'] = ques - qas_inside['answers'].append(answer_inside.copy()) - paragraphs_inside['qas'].append(qas_inside.copy()) - data_inside['paragraphs'].append(paragraphs_inside.copy()) - - res['data'].append(data_inside.copy()) - print('make json done') - return json.dumps(res) - - -class SquadExample(object): - """A single training/test example for simple sequence classification. - - For examples without an answer, the start and end position are -1. - """ - - def __init__(self, - qas_id, - question_text, - doc_tokens, - orig_answer_text=None, - start_position=None, - end_position=None, - is_impossible=False): - self.qas_id = qas_id - self.question_text = question_text - self.doc_tokens = doc_tokens - self.orig_answer_text = orig_answer_text - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - - def __str__(self): - return self.__repr__() - - def __repr__(self): - s = "" - s += "qas_id: %s" % (tokenization.printable_text(self.qas_id)) - s += ", question_text: %s" % ( - tokenization.printable_text(self.question_text)) - s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) - if self.start_position: - s += ", start_position: %d" % (self.start_position) - if self.start_position: - s += ", end_position: %d" % (self.end_position) - if self.start_position: - s += ", is_impossible: %r" % (self.is_impossible) - return s - - -class InputFeatures(object): - """A single set of features of data.""" - - def __init__(self, - unique_id, - example_index, - doc_span_index, - tokens, - token_to_orig_map, - token_is_max_context, - input_ids, - input_mask, - segment_ids, - start_position=None, - end_position=None, - is_impossible=None): - self.unique_id = unique_id - self.example_index = example_index - self.doc_span_index = doc_span_index - self.tokens = tokens - self.token_to_orig_map = token_to_orig_map - self.token_is_max_context = token_is_max_context - self.input_ids = input_ids - self.input_mask = input_mask - self.segment_ids = segment_ids - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - - -def read_squad_examples(input_file, is_training, questions, FLAGS_version_2_with_negative): - """Read a SQuAD json file into a list of SquadExample.""" - data = make_json(input_file, questions) - input_data = json.loads(data)["data"] - - def is_whitespace(c): - if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: - return True - return False - - examples = [] - for entry in input_data: - for paragraph in entry["paragraphs"]: - paragraph_text = paragraph["context"] - doc_tokens = [] - char_to_word_offset = [] - prev_is_whitespace = True - for c in paragraph_text: - if is_whitespace(c): - prev_is_whitespace = True - else: - if prev_is_whitespace: - doc_tokens.append(c) - else: - doc_tokens[-1] += c - prev_is_whitespace = False - char_to_word_offset.append(len(doc_tokens) - 1) - - for qa in paragraph["qas"]: - qas_id = qa["id"] - question_text = qa["question"] - start_position = None - end_position = None - orig_answer_text = None - is_impossible = False - if is_training: - - if FLAGS_version_2_with_negative: - is_impossible = qa["is_impossible"] - if (len(qa["answers"]) != 1) and (not is_impossible): - raise ValueError( - "For training, each question should have exactly 1 answer.") - if not is_impossible: - answer = qa["answers"][0] - orig_answer_text = answer["text"] - answer_offset = answer["answer_start"] - answer_length = len(orig_answer_text) - start_position = char_to_word_offset[answer_offset] - end_position = char_to_word_offset[answer_offset + answer_length - 1] - # Only add answers where the text can be exactly recovered from the - # document. If this CAN'T happen it's likely due to weird Unicode - # stuff so we will just skip the example. - # - # Note that this means for training mode, every example is NOT - # guaranteed to be preserved. - actual_text = " ".join( - doc_tokens[start_position:(end_position + 1)]) - cleaned_answer_text = " ".join( - tokenization.whitespace_tokenize(orig_answer_text)) - if actual_text.find(cleaned_answer_text) == -1: - tf.logging.warning("Could not find answer: '%s' vs. '%s'", - actual_text, cleaned_answer_text) - continue - else: - start_position = -1 - end_position = -1 - orig_answer_text = "" - - example = SquadExample( - qas_id=qas_id, - question_text=question_text, - doc_tokens=doc_tokens, - orig_answer_text=orig_answer_text, - start_position=start_position, - end_position=end_position, - is_impossible=is_impossible) - examples.append(example) - - return examples - - -def convert_examples_to_features(examples, tokenizer, max_seq_length, - doc_stride, max_query_length, is_training, - output_fn): - """Loads a data file into a list of `InputBatch`s.""" - - unique_id = 1000000000 - - for (example_index, example) in enumerate(examples): - query_tokens = tokenizer.tokenize(example.question_text) - - if len(query_tokens) > max_query_length: - query_tokens = query_tokens[0:max_query_length] - - tok_to_orig_index = [] - orig_to_tok_index = [] - all_doc_tokens = [] - for (i, token) in enumerate(example.doc_tokens): - orig_to_tok_index.append(len(all_doc_tokens)) - sub_tokens = tokenizer.tokenize(token) - for sub_token in sub_tokens: - tok_to_orig_index.append(i) - all_doc_tokens.append(sub_token) - - tok_start_position = None - tok_end_position = None - if is_training and example.is_impossible: - tok_start_position = -1 - tok_end_position = -1 - if is_training and not example.is_impossible: - tok_start_position = orig_to_tok_index[example.start_position] - if example.end_position < len(example.doc_tokens) - 1: - tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 - else: - tok_end_position = len(all_doc_tokens) - 1 - (tok_start_position, tok_end_position) = _improve_answer_span( - all_doc_tokens, tok_start_position, tok_end_position, tokenizer, - example.orig_answer_text) - - # The -3 accounts for [CLS], [SEP] and [SEP] - max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 - - # We can have documents that are longer than the maximum sequence length. - # To deal with this we do a sliding window approach, where we take chunks - # of the up to our max length with a stride of `doc_stride`. - _DocSpan = collections.namedtuple( # pylint: disable=invalid-name - "DocSpan", ["start", "length"]) - doc_spans = [] - start_offset = 0 - while start_offset < len(all_doc_tokens): - length = len(all_doc_tokens) - start_offset - if length > max_tokens_for_doc: - length = max_tokens_for_doc - doc_spans.append(_DocSpan(start=start_offset, length=length)) - if start_offset + length == len(all_doc_tokens): - break - start_offset += min(length, doc_stride) - - for (doc_span_index, doc_span) in enumerate(doc_spans): - tokens = [] - token_to_orig_map = {} - token_is_max_context = {} - segment_ids = [] - tokens.append("[CLS]") - segment_ids.append(0) - for token in query_tokens: - tokens.append(token) - segment_ids.append(0) - tokens.append("[SEP]") - segment_ids.append(0) - - for i in range(doc_span.length): - split_token_index = doc_span.start + i - token_to_orig_map[len( - tokens)] = tok_to_orig_index[split_token_index] - - is_max_context = _check_is_max_context(doc_spans, doc_span_index, - split_token_index) - token_is_max_context[len(tokens)] = is_max_context - tokens.append(all_doc_tokens[split_token_index]) - segment_ids.append(1) - tokens.append("[SEP]") - segment_ids.append(1) - - input_ids = tokenizer.convert_tokens_to_ids(tokens) - - # The mask has 1 for real tokens and 0 for padding tokens. Only real - # tokens are attended to. - input_mask = [1] * len(input_ids) - - # Zero-pad up to the sequence length. - while len(input_ids) < max_seq_length: - input_ids.append(0) - input_mask.append(0) - segment_ids.append(0) - - assert len(input_ids) == max_seq_length - assert len(input_mask) == max_seq_length - assert len(segment_ids) == max_seq_length - - start_position = None - end_position = None - if is_training and not example.is_impossible: - # For training, if our document chunk does not contain an annotation - # we throw it out, since there is nothing to predict. - doc_start = doc_span.start - doc_end = doc_span.start + doc_span.length - 1 - out_of_span = False - if not (tok_start_position >= doc_start and tok_end_position <= doc_end): - out_of_span = True - if out_of_span: - start_position = 0 - end_position = 0 - else: - doc_offset = len(query_tokens) + 2 - start_position = tok_start_position - doc_start + doc_offset - end_position = tok_end_position - doc_start + doc_offset - - if is_training and example.is_impossible: - start_position = 0 - end_position = 0 - - if example_index < 20: - tf.logging.info("*** Example ***") - tf.logging.info("unique_id: %s" % (unique_id)) - tf.logging.info("example_index: %s" % (example_index)) - tf.logging.info("doc_span_index: %s" % (doc_span_index)) - tf.logging.info("tokens: %s" % " ".join( - [tokenization.printable_text(x) for x in tokens])) - tf.logging.info("token_to_orig_map: %s" % " ".join( - ["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)])) - tf.logging.info("token_is_max_context: %s" % " ".join([ - "%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context) - ])) - tf.logging.info("input_ids: %s" % - " ".join([str(x) for x in input_ids])) - tf.logging.info( - "input_mask: %s" % " ".join([str(x) for x in input_mask])) - tf.logging.info( - "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) - if is_training and example.is_impossible: - tf.logging.info("impossible example") - if is_training and not example.is_impossible: - answer_text = " ".join( - tokens[start_position:(end_position + 1)]) - tf.logging.info("start_position: %d" % (start_position)) - tf.logging.info("end_position: %d" % (end_position)) - tf.logging.info( - "answer: %s" % (tokenization.printable_text(answer_text))) - - feature = InputFeatures( - unique_id=unique_id, - example_index=example_index, - doc_span_index=doc_span_index, - tokens=tokens, - token_to_orig_map=token_to_orig_map, - token_is_max_context=token_is_max_context, - input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - start_position=start_position, - end_position=end_position, - is_impossible=example.is_impossible) - - # Run callback - output_fn(feature) - - unique_id += 1 - - -def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, - orig_answer_text): - """Returns tokenized answer spans that better match the annotated answer.""" - - # The SQuAD annotations are character based. We first project them to - # whitespace-tokenized words. But then after WordPiece tokenization, we can - # often find a "better match". For example: - # - # Question: What year was John Smith born? - # Context: The leader was John Smith (1895-1943). - # Answer: 1895 - # - # The original whitespace-tokenized answer will be "(1895-1943).". However - # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match - # the exact answer, 1895. - # - # However, this is not always possible. Consider the following: - # - # Question: What country is the top exporter of electornics? - # Context: The Japanese electronics industry is the lagest in the world. - # Answer: Japan - # - # In this case, the annotator chose "Japan" as a character sub-span of - # the word "Japanese". Since our WordPiece tokenizer does not split - # "Japanese", we just use "Japanese" as the annotation. This is fairly rare - # in SQuAD, but does happen. - tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) - - for new_start in range(input_start, input_end + 1): - for new_end in range(input_end, new_start - 1, -1): - text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) - if text_span == tok_answer_text: - return (new_start, new_end) - - return (input_start, input_end) - - -def _check_is_max_context(doc_spans, cur_span_index, position): - """Check if this is the 'max context' doc span for the token.""" - - # Because of the sliding window approach taken to scoring documents, a single - # token can appear in multiple documents. E.g. - # Doc: the man went to the store and bought a gallon of milk - # Span A: the man went to the - # Span B: to the store and bought - # Span C: and bought a gallon of - # ... - # - # Now the word 'bought' will have two scores from spans B and C. We only - # want to consider the score with "maximum context", which we define as - # the *minimum* of its left and right context (the *sum* of left and - # right context will always be the same, of course). - # - # In the example the maximum context for 'bought' would be span C since - # it has 1 left context and 3 right context, while span B has 4 left context - # and 0 right context. - best_score = None - best_span_index = None - for (span_index, doc_span) in enumerate(doc_spans): - end = doc_span.start + doc_span.length - 1 - if position < doc_span.start: - continue - if position > end: - continue - num_left_context = position - doc_span.start - num_right_context = end - position - score = min(num_left_context, num_right_context) + \ - 0.01 * doc_span.length - if best_score is None or score > best_score: - best_score = score - best_span_index = span_index - - return cur_span_index == best_span_index - - -def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, - use_one_hot_embeddings): - """Creates a classification model.""" - model = modeling.BertModel( - config=bert_config, - is_training=is_training, - input_ids=input_ids, - input_mask=input_mask, - token_type_ids=segment_ids, - use_one_hot_embeddings=use_one_hot_embeddings) - - final_hidden = model.get_sequence_output() - - final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3) - batch_size = final_hidden_shape[0] - seq_length = final_hidden_shape[1] - hidden_size = final_hidden_shape[2] - - output_weights = tf.get_variable( - "cls/squad/output_weights", [2, hidden_size], - initializer=tf.truncated_normal_initializer(stddev=0.02)) - - output_bias = tf.get_variable( - "cls/squad/output_bias", [2], initializer=tf.zeros_initializer()) - - final_hidden_matrix = tf.reshape(final_hidden, - [batch_size * seq_length, hidden_size]) - logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True) - logits = tf.nn.bias_add(logits, output_bias) - - logits = tf.reshape(logits, [batch_size, seq_length, 2]) - logits = tf.transpose(logits, [2, 0, 1]) - - unstacked_logits = tf.unstack(logits, axis=0) - - (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1]) - - return (start_logits, end_logits) - - -def model_fn_builder(bert_config, init_checkpoint, learning_rate, - num_train_steps, num_warmup_steps, use_tpu, - use_one_hot_embeddings): - """Returns `model_fn` closure for TPUEstimator.""" - - def model_fn(features, labels, mode, params): # pylint: disable=unused-argument - """The `model_fn` for TPUEstimator.""" - - tf.logging.info("*** Features ***") - for name in sorted(features.keys()): - tf.logging.info(" name = %s, shape = %s" % - (name, features[name].shape)) - - input_ids = features["input_ids"] - input_mask = features["input_mask"] - segment_ids = features["segment_ids"] - - is_training = (mode == tf.estimator.ModeKeys.TRAIN) - - (start_logits, end_logits) = create_model( - bert_config=bert_config, - is_training=is_training, - input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - use_one_hot_embeddings=use_one_hot_embeddings) - - tvars = tf.trainable_variables() - - initialized_variable_names = {} - scaffold_fn = None - if init_checkpoint: - (assignment_map, initialized_variable_names - ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) - if use_tpu: - - def tpu_scaffold(): - tf.train.init_from_checkpoint( - init_checkpoint, assignment_map) - return tf.train.Scaffold() - - scaffold_fn = tpu_scaffold - else: - tf.train.init_from_checkpoint(init_checkpoint, assignment_map) - - tf.logging.info("**** Trainable Variables ****") - for var in tvars: - init_string = "" - if var.name in initialized_variable_names: - init_string = ", *INIT_FROM_CKPT*" - tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, - init_string) - - output_spec = None - if mode == tf.estimator.ModeKeys.TRAIN: - seq_length = modeling.get_shape_list(input_ids)[1] - - def compute_loss(logits, positions): - one_hot_positions = tf.one_hot( - positions, depth=seq_length, dtype=tf.float32) - log_probs = tf.nn.log_softmax(logits, axis=-1) - loss = -tf.reduce_mean( - tf.reduce_sum(one_hot_positions * log_probs, axis=-1)) - return loss - - start_positions = features["start_positions"] - end_positions = features["end_positions"] - - start_loss = compute_loss(start_logits, start_positions) - end_loss = compute_loss(end_logits, end_positions) - - total_loss = (start_loss + end_loss) / 2.0 - - train_op = optimization.create_optimizer( - total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) - - output_spec = tf.contrib.tpu.TPUEstimatorSpec( - mode=mode, - loss=total_loss, - train_op=train_op, - scaffold_fn=scaffold_fn) - elif mode == tf.estimator.ModeKeys.PREDICT: - predictions = { - # "unique_ids": unique_ids, - "start_logits": start_logits, - "end_logits": end_logits, - } - output_spec = tf.contrib.tpu.TPUEstimatorSpec( - mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) - else: - raise ValueError( - "Only TRAIN and PREDICT modes are supported: %s" % (mode)) - - return output_spec - - return model_fn - - -def input_fn_builder(input_file, seq_length, is_training, drop_remainder): - """Creates an `input_fn` closure to be passed to TPUEstimator.""" - - name_to_features = { - "unique_ids": tf.FixedLenFeature([], tf.int64), - "input_ids": tf.FixedLenFeature([seq_length], tf.int64), - "input_mask": tf.FixedLenFeature([seq_length], tf.int64), - "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), - } - - if is_training: - name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64) - name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64) - - def _decode_record(record, name_to_features): - """Decodes a record to a TensorFlow example.""" - example = tf.parse_single_example(record, name_to_features) - - # tf.Example only supports tf.int64, but the TPU only supports tf.int32. - # So cast all int64 to int32. - for name in list(example.keys()): - t = example[name] - if t.dtype == tf.int64: - t = tf.to_int32(t) - example[name] = t - - return example - - def input_fn(params): - """The actual input function.""" - batch_size = params["batch_size"] - - # For training, we want a lot of parallel reading and shuffling. - # For eval, we want no shuffling and parallel reading doesn't matter. - d = tf.data.TFRecordDataset(input_file) - if is_training: - d = d.repeat() - d = d.shuffle(buffer_size=100) - - d = d.apply( - tf.contrib.data.map_and_batch( - lambda record: _decode_record(record, name_to_features), - batch_size=batch_size, - drop_remainder=drop_remainder)) - - return d - - return input_fn - - -RawResult = collections.namedtuple("RawResult", - ["unique_id", "start_logits", "end_logits"]) - - -def write_predictions(all_examples, all_features, all_results, n_best_size, - max_answer_length, do_lower_case, output_prediction_file, - output_nbest_file, output_null_log_odds_file, FLAGS_version_2_with_negative): - """Write final predictions to the json file and log-odds of null if needed.""" - tf.logging.info("Writing predictions to: %s" % (output_prediction_file)) - tf.logging.info("Writing nbest to: %s" % (output_nbest_file)) - - example_index_to_features = collections.defaultdict(list) - for feature in all_features: - example_index_to_features[feature.example_index].append(feature) - - unique_id_to_result = {} - for result in all_results: - unique_id_to_result[result.unique_id] = result - - _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name - "PrelimPrediction", - ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) - - all_predictions = collections.OrderedDict() - all_nbest_json = collections.OrderedDict() - - for (example_index, example) in enumerate(all_examples): - features = example_index_to_features[example_index] - - prelim_predictions = [] - # keep track of the minimum score of null start+end of position 0 - score_null = 1000000 # large and positive - for (feature_index, feature) in enumerate(features): - result = unique_id_to_result[feature.unique_id] - start_indexes = _get_best_indexes(result.start_logits, n_best_size) - end_indexes = _get_best_indexes(result.end_logits, n_best_size) - # if we could have irrelevant answers, get the min score of irrelevant - if FLAGS_version_2_with_negative: - feature_null_score = result.start_logits[0] + \ - result.end_logits[0] - if feature_null_score < score_null: - score_null = feature_null_score - for start_index in start_indexes: - for end_index in end_indexes: - # We could hypothetically create invalid predictions, e.g., predict - # that the start of the span is in the question. We throw out all - # invalid predictions. - if start_index >= len(feature.tokens): - continue - if end_index >= len(feature.tokens): - continue - if start_index not in feature.token_to_orig_map: - continue - if end_index not in feature.token_to_orig_map: - continue - if not feature.token_is_max_context.get(start_index, False): - continue - if end_index < start_index: - continue - length = end_index - start_index + 1 - if length > max_answer_length: - continue - prelim_predictions.append( - _PrelimPrediction( - feature_index=feature_index, - start_index=start_index, - end_index=end_index, - start_logit=result.start_logits[start_index], - end_logit=result.end_logits[end_index])) - - prelim_predictions = sorted( - prelim_predictions, - key=lambda x: (x.start_logit + x.end_logit), - reverse=True) - - _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name - "NbestPrediction", ["text", "start_logit", "end_logit"]) - - seen_predictions = {} - nbest = [] - for pred in prelim_predictions: - if len(nbest) >= n_best_size: - break - feature = features[pred.feature_index] - if pred.start_index > 0: # this is a non-null prediction - tok_tokens = feature.tokens[pred.start_index:( - pred.end_index + 1)] - orig_doc_start = feature.token_to_orig_map[pred.start_index] - orig_doc_end = feature.token_to_orig_map[pred.end_index] - orig_tokens = example.doc_tokens[orig_doc_start:( - orig_doc_end + 1)] - tok_text = " ".join(tok_tokens) - - # De-tokenize WordPieces that have been split off. - tok_text = tok_text.replace(" ##", "") - tok_text = tok_text.replace("##", "") - - # Clean whitespace - tok_text = tok_text.strip() - tok_text = " ".join(tok_text.split()) - orig_text = " ".join(orig_tokens) - - final_text = get_final_text(tok_text, orig_text, do_lower_case) - if final_text in seen_predictions: - continue - - seen_predictions[final_text] = True - else: - final_text = "" - seen_predictions[final_text] = True - - nbest.append( - _NbestPrediction( - text=final_text, - start_logit=pred.start_logit, - end_logit=pred.end_logit)) - - # In very rare edge cases we could have no valid predictions. So we - # just create a nonce prediction in this case to avoid failure. - if not nbest: - nbest.append( - _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) - - assert len(nbest) >= 1 - - total_scores = [] - best_non_null_entry = None - for entry in nbest: - total_scores.append(entry.start_logit + entry.end_logit) - if not best_non_null_entry: - if entry.text: - best_non_null_entry = entry - - probs = _compute_softmax(total_scores) - - nbest_json = [] - for (i, entry) in enumerate(nbest): - output = collections.OrderedDict() - output["text"] = entry.text - output["probability"] = probs[i] - output["start_logit"] = entry.start_logit - output["end_logit"] = entry.end_logit - nbest_json.append(output) - - assert len(nbest_json) >= 1 - - all_predictions[example.qas_id] = nbest_json[0]["text"] - - all_nbest_json[example.qas_id] = nbest_json - - with tf.gfile.GFile(output_prediction_file, "w") as writer: - writer.write(json.dumps(all_predictions, indent=4) + "\n") - - -def get_final_text(pred_text, orig_text, do_lower_case): - """Project the tokenized prediction back to the original text.""" - - # When we created the data, we kept track of the alignment between original - # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So - # now `orig_text` contains the span of our original text corresponding to the - # span that we predicted. - # - # However, `orig_text` may contain extra characters that we don't want in - # our prediction. - # - # For example, let's say: - # pred_text = steve smith - # orig_text = Steve Smith's - # - # We don't want to return `orig_text` because it contains the extra "'s". - # - # We don't want to return `pred_text` because it's already been normalized - # (the SQuAD eval script also does punctuation stripping/lower casing but - # our tokenizer does additional normalization like stripping accent - # characters). - # - # What we really want to return is "Steve Smith". - # - # Therefore, we have to apply a semi-complicated alignment heruistic between - # `pred_text` and `orig_text` to get a character-to-charcter alignment. This - # can fail in certain cases in which case we just return `orig_text`. - - def _strip_spaces(text): - ns_chars = [] - ns_to_s_map = collections.OrderedDict() - for (i, c) in enumerate(text): - if c == " ": - continue - ns_to_s_map[len(ns_chars)] = i - ns_chars.append(c) - ns_text = "".join(ns_chars) - return (ns_text, ns_to_s_map) - - # We first tokenize `orig_text`, strip whitespace from the result - # and `pred_text`, and check if they are the same length. If they are - # NOT the same length, the heuristic has failed. If they are the same - # length, we assume the characters are one-to-one aligned. - tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case) - - tok_text = " ".join(tokenizer.tokenize(orig_text)) - - start_position = tok_text.find(pred_text) - if start_position == -1: - if FLAGS_verbose_logging: - tf.logging.info( - "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) - return orig_text - end_position = start_position + len(pred_text) - 1 - - (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) - (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) - - if len(orig_ns_text) != len(tok_ns_text): - if FLAGS_verbose_logging: - tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'", - orig_ns_text, tok_ns_text) - return orig_text - - # We then project the characters in `pred_text` back to `orig_text` using - # the character-to-character alignment. - tok_s_to_ns_map = {} - for (i, tok_index) in six.iteritems(tok_ns_to_s_map): - tok_s_to_ns_map[tok_index] = i - - orig_start_position = None - if start_position in tok_s_to_ns_map: - ns_start_position = tok_s_to_ns_map[start_position] - if ns_start_position in orig_ns_to_s_map: - orig_start_position = orig_ns_to_s_map[ns_start_position] - - if orig_start_position is None: - if FLAGS_verbose_logging: - tf.logging.info("Couldn't map start position") - return orig_text - - orig_end_position = None - if end_position in tok_s_to_ns_map: - ns_end_position = tok_s_to_ns_map[end_position] - if ns_end_position in orig_ns_to_s_map: - orig_end_position = orig_ns_to_s_map[ns_end_position] - - if orig_end_position is None: - if FLAGS_verbose_logging: - tf.logging.info("Couldn't map end position") - return orig_text - - output_text = orig_text[orig_start_position:(orig_end_position + 1)] - return output_text - - -def _get_best_indexes(logits, n_best_size): - """Get the n-best logits from a list.""" - index_and_score = sorted( - enumerate(logits), key=lambda x: x[1], reverse=True) - - best_indexes = [] - for i in range(len(index_and_score)): - if i >= n_best_size: - break - best_indexes.append(index_and_score[i][0]) - return best_indexes - - -def _compute_softmax(scores): - """Compute softmax probability over raw logits.""" - if not scores: - return [] - - max_score = None - for score in scores: - if max_score is None or score > max_score: - max_score = score - - exp_scores = [] - total_sum = 0.0 - for score in scores: - x = math.exp(score - max_score) - exp_scores.append(x) - total_sum += x - - probs = [] - for score in exp_scores: - probs.append(score / total_sum) - return probs - - -class FeatureWriter(object): - """Writes InputFeature to TF example file.""" - - def __init__(self, filename, is_training): - self.filename = filename - self.is_training = is_training - self.num_features = 0 - self._writer = tf.python_io.TFRecordWriter(filename) - - def process_feature(self, feature): - """Write a InputFeature to the TFRecordWriter as a tf.train.Example.""" - self.num_features += 1 - - def create_int_feature(values): - feature = tf.train.Feature( - int64_list=tf.train.Int64List(value=list(values))) - return feature - - features = collections.OrderedDict() - features["unique_ids"] = create_int_feature([feature.unique_id]) - features["input_ids"] = create_int_feature(feature.input_ids) - features["input_mask"] = create_int_feature(feature.input_mask) - features["segment_ids"] = create_int_feature(feature.segment_ids) - - if self.is_training: - features["start_positions"] = create_int_feature( - [feature.start_position]) - features["end_positions"] = create_int_feature( - [feature.end_position]) - impossible = 0 - if feature.is_impossible: - impossible = 1 - features["is_impossible"] = create_int_feature([impossible]) - - tf_example = tf.train.Example( - features=tf.train.Features(feature=features)) - self._writer.write(tf_example.SerializeToString()) - - def close(self): - self._writer.close() - - -def validate_flags_or_throw(bert_config): - """Validate the input FLAGS or throw an exception.""" - tokenization.validate_case_matches_checkpoint(FLAGS_do_lower_case, - FLAGS_init_checkpoint_squad) - - # if not FLAGS_do_train and not FLAGS_do_predict: - # raise ValueError( - # "At least one of `do_train` or `do_predict` must be True.") - - # if FLAGS_do_train: - # if not FLAGS_train_file: - # raise ValueError( - # "If `do_train` is True, then `train_file` must be specified.") - # if FLAGS_do_predict: - # if not FLAGS_predict_file: - # raise ValueError( - # "If `do_predict` is True, then `predict_file` must be specified.") - - # if FLAGS_max_seq_length > bert_config.max_position_embeddings: - # raise ValueError( - # "Cannot use sequence length %d because the BERT model " - # "was only trained up to sequence length %d" % - # (FLAGS_max_seq_length, bert_config.max_position_embeddings)) - - # if FLAGS_max_seq_length <= FLAGS_max_query_length + 3: - # raise ValueError( - # "The max_seq_length (%d) must be greater than max_query_length " - # "(%d) + 3" % (FLAGS_max_seq_length, FLAGS_max_query_length)) diff --git a/scripts/api_squad_offline.py b/scripts/api_squad_offline.py deleted file mode 100644 index 8a05141..0000000 --- a/scripts/api_squad_offline.py +++ /dev/null @@ -1,264 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -# auther = 'liuzhiyong' -# date = 20201204 - - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from flask import Flask, abort, request, jsonify -from concurrent.futures import ThreadPoolExecutor - -import os -import random -import modeling -import tokenization -import tensorflow as tf -import sys - -from api_squad import FLAGS_max_seq_length -from api_squad import FLAGS_do_lower_case -from api_squad import FLAGS_use_tpu -from api_squad import FLAGS_tpu_name -from api_squad import FLAGS_tpu_zone -from api_squad import FLAGS_gcp_project -from api_squad import FLAGS_master -from api_squad import FLAGS_save_checkpoints_steps -from api_squad import FLAGS_iterations_per_loop -from api_squad import FLAGS_num_tpu_cores -from api_squad import FLAGS_warmup_proportion -from api_squad import FLAGS_doc_stride -from api_squad import model_fn_builder -from api_squad import FeatureWriter -from api_squad import convert_examples_to_features -from api_squad import input_fn_builder - -from global_setting import CUDA_VISIBLE_DEVICES -from global_setting import validate_flags_or_throw -from global_setting import read_squad_examples -from global_setting import FLAGS_bert_config_file, FLAGS_vocab_file, FLAGS_init_checkpoint_squad, questions - -os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" -os.environ["CUDA_VISIBLE_DEVICES"] = str(CUDA_VISIBLE_DEVICES) - -app = Flask(__name__) - - -def serving_input_fn(): - input_ids = tf.placeholder(tf.int32, [None, FLAGS_max_seq_length], name='input_ids') - unique_id = tf.placeholder(tf.int32, [None]) - input_mask = tf.placeholder(tf.int32, [None, FLAGS_max_seq_length], name='input_mask') - segment_ids = tf.placeholder(tf.int32, [None, FLAGS_max_seq_length], name='segment_ids') - input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({ - 'input_ids': input_ids, - 'input_mask': input_mask, - 'segment_ids': segment_ids, - 'unique_ids': unique_id, - })() - return input_fn - - -def main(FLAGS_output_dir, FLAGS_init_checkpoint_squad, FLAGS_export_dir, FLAGS_predict_file=None, FLAGS_train_file=None, FLAGS_do_predict=False, - FLAGS_do_train=False, FLAGS_train_batch_size=16, FLAGS_predict_batch_size=8, FLAGS_learning_rate=5e-5, FLAGS_num_train_epochs=3.0, - FLAGS_max_answer_length=100, FLAGS_max_query_length=64, FLAGS_version_2_with_negative=False): - tf.logging.set_verbosity(tf.logging.INFO) - - bert_config = modeling.BertConfig.from_json_file(FLAGS_bert_config_file) - - validate_flags_or_throw(bert_config) - - tf.gfile.MakeDirs(FLAGS_output_dir) - - tokenizer = tokenization.FullTokenizer( - vocab_file=FLAGS_vocab_file, do_lower_case=FLAGS_do_lower_case) - - tpu_cluster_resolver = None - if FLAGS_use_tpu and FLAGS_tpu_name: - tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( - FLAGS_tpu_name, zone=FLAGS_tpu_zone, project=FLAGS_gcp_project) - is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 - run_config = tf.contrib.tpu.RunConfig( - cluster=tpu_cluster_resolver, - master=FLAGS_master, - model_dir=FLAGS_output_dir, - save_checkpoints_steps=FLAGS_save_checkpoints_steps, - tpu_config=tf.contrib.tpu.TPUConfig( - iterations_per_loop=FLAGS_iterations_per_loop, - num_shards=FLAGS_num_tpu_cores, - per_host_input_for_training=is_per_host)) - - train_examples = None - num_train_steps = None - num_warmup_steps = None - - if FLAGS_do_train: - train_examples = read_squad_examples( - input_file=FLAGS_train_file, is_training=True, questions=questions, FLAGS_version_2_with_negative=FLAGS_version_2_with_negative) - num_train_steps = int( - len(train_examples) / FLAGS_train_batch_size * FLAGS_num_train_epochs) - num_warmup_steps = int(num_train_steps * FLAGS_warmup_proportion) - - # Pre-shuffle the input to avoid having to make a very large shuffle - # buffer in in the `input_fn`. - rng = random.Random(12345) - rng.shuffle(train_examples) - - model_fn = model_fn_builder( - bert_config=bert_config, - init_checkpoint=FLAGS_init_checkpoint_squad, - learning_rate=FLAGS_learning_rate, - num_train_steps=num_train_steps, - num_warmup_steps=num_warmup_steps, - use_tpu=FLAGS_use_tpu, - use_one_hot_embeddings=FLAGS_use_tpu) - - # If TPU is not available, this will fall back to normal Estimator on CPU - # or GPU. - estimator = tf.contrib.tpu.TPUEstimator( - use_tpu=FLAGS_use_tpu, - model_fn=model_fn, - config=run_config, - train_batch_size=FLAGS_train_batch_size, - predict_batch_size=FLAGS_predict_batch_size) - - if FLAGS_do_train: - # We write to a temporary file to avoid storing very large constant tensors - # in memory. - train_writer = FeatureWriter( - filename=os.path.join(FLAGS_output_dir, "train.tf_record"), - is_training=True) - convert_examples_to_features( - examples=train_examples, - tokenizer=tokenizer, - max_seq_length=FLAGS_max_seq_length, - doc_stride=FLAGS_doc_stride, - max_query_length=FLAGS_max_query_length, - is_training=True, - output_fn=train_writer.process_feature) - train_writer.close() - - tf.logging.info("***** Running training *****") - tf.logging.info(" Num orig examples = %d", len(train_examples)) - tf.logging.info(" Num split examples = %d", train_writer.num_features) - tf.logging.info(" Batch size = %d", FLAGS_train_batch_size) - tf.logging.info(" Num steps = %d", num_train_steps) - del train_examples - - train_input_fn = input_fn_builder( - input_file=train_writer.filename, - seq_length=FLAGS_max_seq_length, - is_training=True, - drop_remainder=True) - estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) - estimator._export_to_tpu = False - estimator.export_savedmodel(FLAGS_export_dir, serving_input_fn) - return 'success' - - -class AI2Flask: - - def __init__(self, port=5000, workers=4): - self.app = app - self.port = port - p = ThreadPoolExecutor(max_workers=workers) - threads_mapping = {} - - def check_threads(): - flag = False - pop_keys = set() - if len(threads_mapping) >= workers: - for k, v in threads_mapping.items(): - if v.running(): - flag = True - else: - pop_keys.add(k) - - for k in pop_keys: - threads_mapping.pop(k) - - return flag - - @app.route('/api/offline/train', methods=['POST']) - def text_analyse(): - if not request.json or 'task_id' not in request.json: - abort(400) - if check_threads(): - return jsonify({"Des": "Task list is full. Can not submit new task! ", "Result": "Failed to submit the training task ", "Status": "ERROR"}) - - else: - try: - FLAGS_train_batch_size = request.json['FLAGS_train_batch_size'] - except: - FLAGS_train_batch_size = 16 - try: - FLAGS_learning_rate = request.json['FLAGS_learning_rate'] - except: - FLAGS_learning_rate = 5e-5 - try: - FLAGS_num_train_epochs = request.json['FLAGS_num_train_epochs'] - except: - FLAGS_num_train_epochs = 3.0 - try: - FLAGS_max_answer_length = request.json['FLAGS_max_answer_length'] - except: - FLAGS_max_answer_length = 100 - try: - FLAGS_max_query_length = request.json['FLAGS_max_query_length'] - except: - FLAGS_max_query_length = 64 - try: - FLAGS_version_2_with_negative = request.json['FLAGS_version_2_with_negative'] - except: - FLAGS_version_2_with_negative = True - - try: - FLAGS_predict_file = None - FLAGS_predict_batch_size = 8 - FLAGS_do_predict = False - FLAGS_do_train = True - FLAGS_output_dir = request.json['FLAGS_output_dir'] - FLAGS_train_file = request.json['FLAGS_train_file'] - FLAGS_export_dir = request.json['FLAGS_export_dir'] - task_id = request.json['task_id'] - - task = p.submit(main, FLAGS_output_dir, FLAGS_init_checkpoint_squad, FLAGS_export_dir, FLAGS_predict_file, FLAGS_train_file, FLAGS_do_predict, - FLAGS_do_train, FLAGS_train_batch_size, FLAGS_predict_batch_size, FLAGS_learning_rate, FLAGS_num_train_epochs, - FLAGS_max_answer_length, FLAGS_max_query_length, FLAGS_version_2_with_negative) - threads_mapping[task_id] = task - - return jsonify({"message": "Task submitted successfully", "status": "0"}) - - except KeyError as e: - return jsonify({"Des": 'KeyError: {}'.format(str(e)), "Result": 'None', "Status": "Error"}) - except Exception as e: - return jsonify({"Des": str(e), "Result": 'None', "Status": "Error"}) - - @app.route('/api/offline/status', methods=['POST']) - def todo_status(): - task_id = request.json['task_id'] - task = threads_mapping.get(task_id, None) - try: - if task is None: - return jsonify({'Des': 'The task was not found', 'Status': 'ERROR'}) - else: - if task.done(): - print(task.result) - if task.result() == 'success': - return jsonify({'Des': 'DONE', 'Status': 'OK'}) - else: - return jsonify({'Des': 'Program execution error. Please check the execution log ', 'Status': 'ERROR'}) - - else: - return jsonify({'Des': 'RUNNING', 'Status': 'OK'}) - except Exception as e: - return jsonify({'Des': str(e), 'Status': 'ERROR'}) - - def start(self): - self.app.run(host="0.0.0.0", port=self.port, threaded=True) - - -if __name__ == '__main__': - port = sys.argv[1] - AI2Flask(port=port).start() diff --git a/scripts/api_squad_online.py b/scripts/api_squad_online.py deleted file mode 100644 index abe3d5f..0000000 --- a/scripts/api_squad_online.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -# auther = 'liuzhiyong' -# date = 20201204 - - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -import json -import sys -from flask import Flask, abort, request, jsonify - -import os -from global_setting import questions, tokenizer_ch, CUDA_VISIBLE_DEVICES -from create_squad_features import get_squad_feature_result - - -app = Flask(__name__) - - -class AI2Flask: - - def __init__(self, port=5000, workers=4): - self.app = app - self.port = port - - @app.route('/api/online/predict', methods=['POST']) - def text_analyse(): - if not request.json: - abort(400) - - else: - try: - try: - title = request.json['title'] - except: - title = 'Not available' - text_origin = request.json['text'] - - if len(text_origin) > 800: - text = text_origin[:800] - else: - text = text_origin - - result = {} - for ques in questions: - tmp = get_squad_feature_result(title=title, text=text, tokenizer=tokenizer_ch, question=[ques], url='http://localhost:8502/v1/models/predict:predict') - result[ques] = dict(tmp)[ques] - - print('finished!!') - return json.dumps(result) - - except KeyError as e: - return jsonify({"Des": 'KeyError: {}'.format(str(e)), "Result": 'None', "Status": "Error"}) - except Exception as e: - return jsonify({"Des": str(e), "Result": 'None', "Status": "Error"}) - - @app.route('/api/online/load', methods=['POST']) - def load_model(): - if not request.json: - abort(400) - else: - try: - path = request.json['path'] - flag = os.system('./load_model.sh ' + path + ' ' + CUDA_VISIBLE_DEVICES) - if flag == 0: - return jsonify({"Des": "Model loaded successfully !", "Status": "OK"}) - else: - return jsonify({"Des": "Model loaded failed , check the logs !", "Status": "Error"}) - except Exception as e: - return jsonify({"Des": str(e), "Status": "Error"}) - - def start(self): - self.app.run(host="0.0.0.0", port=self.port, threaded=True) - - -if __name__ == '__main__': - port = sys.argv[1] - AI2Flask(port=port).start() diff --git a/scripts/create_squad_features.py b/scripts/create_squad_features.py deleted file mode 100644 index ce274e0..0000000 --- a/scripts/create_squad_features.py +++ /dev/null @@ -1,721 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -# auther = 'liuzhiyong' -# date = 20201204 - - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -import json - -import collections -import math -import tokenization -import six -import tensorflow as tf -import requests - -from global_setting import _improve_answer_span - -version_2_with_negative = True - - -def get_squad_feature_result(title, text, tokenizer, question, url): - - def make_json(title, text, question): - res = {} - res['data'] = [] - data_inside = {} - - data_inside['title'] = title - data_inside['paragraphs'] = [] - paragraphs_inside = {} - paragraphs_inside['context'] = text - paragraphs_inside['qas'] = [] - for ques in question: - qas_inside = {} - qas_inside['answers'] = [] - - answer_inside = {} - - qas_inside['id'] = ques - qas_inside['question'] = ques - qas_inside['answers'].append(answer_inside.copy()) - paragraphs_inside['qas'].append(qas_inside.copy()) - data_inside['paragraphs'].append(paragraphs_inside.copy()) - - res['data'].append(data_inside.copy()) - return json.dumps(res) - - def _compute_softmax(scores): - """Compute softmax probability over raw logits.""" - if not scores: - return [] - - max_score = None - for score in scores: - if max_score is None or score > max_score: - max_score = score - - exp_scores = [] - total_sum = 0.0 - for score in scores: - x = math.exp(score - max_score) - exp_scores.append(x) - total_sum += x - - probs = [] - for score in exp_scores: - probs.append(score / total_sum) - return probs - - def get_final_text(pred_text, orig_text, do_lower_case): - - def _strip_spaces(text): - ns_chars = [] - ns_to_s_map = collections.OrderedDict() - for (i, c) in enumerate(text): - if c == " ": - continue - ns_to_s_map[len(ns_chars)] = i - ns_chars.append(c) - ns_text = "".join(ns_chars) - return (ns_text, ns_to_s_map) - - # We first tokenize `orig_text`, strip whitespace from the result - # and `pred_text`, and check if they are the same length. If they are - # NOT the same length, the heuristic has failed. If they are the same - # length, we assume the characters are one-to-one aligned. - tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case) - - tok_text = " ".join(tokenizer.tokenize(orig_text)) - - start_position = tok_text.find(pred_text) - if start_position == -1: - if 0: - tf.logging.info( - "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) - return orig_text - end_position = start_position + len(pred_text) - 1 - - (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) - (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) - - if len(orig_ns_text) != len(tok_ns_text): - if 0: - tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'", - orig_ns_text, tok_ns_text) - return orig_text - - # We then project the characters in `pred_text` back to `orig_text` using - # the character-to-character alignment. - tok_s_to_ns_map = {} - for (i, tok_index) in six.iteritems(tok_ns_to_s_map): - tok_s_to_ns_map[tok_index] = i - - orig_start_position = None - if start_position in tok_s_to_ns_map: - ns_start_position = tok_s_to_ns_map[start_position] - if ns_start_position in orig_ns_to_s_map: - orig_start_position = orig_ns_to_s_map[ns_start_position] - - if orig_start_position is None: - if 0: - tf.logging.info("Couldn't map start position") - return orig_text - - orig_end_position = None - if end_position in tok_s_to_ns_map: - ns_end_position = tok_s_to_ns_map[end_position] - if ns_end_position in orig_ns_to_s_map: - orig_end_position = orig_ns_to_s_map[ns_end_position] - - if orig_end_position is None: - if 0: - tf.logging.info("Couldn't map end position") - return orig_text - - output_text = orig_text[orig_start_position:(orig_end_position + 1)] - return output_text - - def _get_best_indexes(logits, n_best_size): - - index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) - - best_indexes = [] - for i in range(len(index_and_score)): - if i >= n_best_size: - break - best_indexes.append(index_and_score[i][0]) - return best_indexes - - RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"]) - - def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case): - """Write final predictions to the json file and log-odds of null if needed.""" - - example_index_to_features = collections.defaultdict(list) - for feature in all_features: - example_index_to_features[feature.example_index].append(feature) - - unique_id_to_result = {} - for result in all_results: - unique_id_to_result[result.unique_id] = result - - _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name - "PrelimPrediction", - ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) - - all_predictions = collections.OrderedDict() - all_nbest_json = collections.OrderedDict() - scores_diff_json = collections.OrderedDict() - - for (example_index, example) in enumerate(all_examples): - features = example_index_to_features[example_index] - - prelim_predictions = [] - # keep track of the minimum score of null start+end of position 0 - score_null = 1000000 # large and positive - min_null_feature_index = 0 # the paragraph slice with min mull score - null_start_logit = 0 # the start logit at the slice with min null score - null_end_logit = 0 # the end logit at the slice with min null score - for (feature_index, feature) in enumerate(features): - result = unique_id_to_result[feature.unique_id] - start_indexes = _get_best_indexes(result.start_logits, n_best_size) - end_indexes = _get_best_indexes(result.end_logits, n_best_size) - # if we could have irrelevant answers, get the min score of irrelevant - if version_2_with_negative: - feature_null_score = result.start_logits[0] + result.end_logits[0] - if feature_null_score < score_null: - score_null = feature_null_score - min_null_feature_index = feature_index - null_start_logit = result.start_logits[0] - null_end_logit = result.end_logits[0] - - for start_index in start_indexes: - for end_index in end_indexes: - # We could hypothetically create invalid predictions, e.g., predict - # that the start of the span is in the question. We throw out all - # invalid predictions. - if start_index >= len(feature.tokens): - continue - if end_index >= len(feature.tokens): - continue - if start_index not in feature.token_to_orig_map: - continue - if end_index not in feature.token_to_orig_map: - continue - if not feature.token_is_max_context.get(start_index, False): - continue - if end_index < start_index: - continue - length = end_index - start_index + 1 - if length > max_answer_length: - continue - prelim_predictions.append( - _PrelimPrediction( - feature_index=feature_index, - start_index=start_index, - end_index=end_index, - start_logit=result.start_logits[start_index], - end_logit=result.end_logits[end_index])) - - if version_2_with_negative: - prelim_predictions.append( - _PrelimPrediction( - feature_index=min_null_feature_index, - start_index=0, - end_index=0, - start_logit=null_start_logit, - end_logit=null_end_logit)) - prelim_predictions = sorted( - prelim_predictions, - key=lambda x: (x.start_logit + x.end_logit), - reverse=True) - - _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name - "NbestPrediction", ["text", "start_logit", "end_logit"]) - - seen_predictions = {} - nbest = [] - for pred in prelim_predictions: - if len(nbest) >= n_best_size: - break - feature = features[pred.feature_index] - if pred.start_index > 0: # this is a non-null prediction - tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] - orig_doc_start = feature.token_to_orig_map[pred.start_index] - orig_doc_end = feature.token_to_orig_map[pred.end_index] - orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] - tok_text = " ".join(tok_tokens) - - # De-tokenize WordPieces that have been split off. - tok_text = tok_text.replace(" ##", "") - tok_text = tok_text.replace("##", "") - - # Clean whitespace - tok_text = tok_text.strip() - tok_text = " ".join(tok_text.split()) - orig_text = " ".join(orig_tokens) - - final_text = get_final_text(tok_text, orig_text, do_lower_case) - if final_text in seen_predictions: - continue - - seen_predictions[final_text] = True - else: - final_text = "" - seen_predictions[final_text] = True - - nbest.append( - _NbestPrediction( - text=final_text, - start_logit=pred.start_logit, - end_logit=pred.end_logit)) - - # if we didn't inlude the empty option in the n-best, inlcude it - if version_2_with_negative: - if "" not in seen_predictions: - nbest.append( - _NbestPrediction( - text="", start_logit=null_start_logit, - end_logit=null_end_logit)) - - # In very rare edge cases we could have no valid predictions. So we - # just create a nonce prediction in this case to avoid failure. - if not nbest: - nbest.append( - _NbestPrediction(text="", start_logit=0.0, end_logit=0.0)) - - assert len(nbest) >= 1 - - total_scores = [] - best_non_null_entry = None - for entry in nbest: - total_scores.append(entry.start_logit + entry.end_logit) - if not best_non_null_entry: - if entry.text: - best_non_null_entry = entry - - probs = _compute_softmax(total_scores) - - nbest_json = [] - for (i, entry) in enumerate(nbest): - output = collections.OrderedDict() - output["text"] = entry.text - output["probability"] = probs[i] - output["start_logit"] = entry.start_logit - output["end_logit"] = entry.end_logit - nbest_json.append(output) - - assert len(nbest_json) >= 1 - - if not version_2_with_negative: - all_predictions[example.qas_id] = nbest_json[0]["text"] - else: - # predict "" iff the null score - the score of best non-null > threshold - score_diff = score_null - best_non_null_entry.start_logit - ( - best_non_null_entry.end_logit) - scores_diff_json[example.qas_id] = score_diff - if score_diff > 0: - all_predictions[example.qas_id] = "" - else: - all_predictions[example.qas_id] = best_non_null_entry.text - - all_nbest_json[example.qas_id] = nbest_json - return all_predictions - - def create_int_feature(values): - - feature = tf.train.Feature( - int64_list=tf.train.Int64List(value=list(values))) - return feature - - class InputFeatures(object): - """A single set of features of data.""" - - def __init__(self, - unique_id, - example_index, - doc_span_index, - tokens, - token_to_orig_map, - token_is_max_context, - input_ids, - input_mask, - segment_ids, - start_position=None, - end_position=None, - is_impossible=None): - self.unique_id = unique_id - self.example_index = example_index - self.doc_span_index = doc_span_index - self.tokens = tokens - self.token_to_orig_map = token_to_orig_map - self.token_is_max_context = token_is_max_context - self.input_ids = input_ids - self.input_mask = input_mask - self.segment_ids = segment_ids - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - - def _check_is_max_context(doc_spans, cur_span_index, position): - """Check if this is the 'max context' doc span for the token.""" - - # Because of the sliding window approach taken to scoring documents, a single - # token can appear in multiple documents. E.g. - # Doc: the man went to the store and bought a gallon of milk - # Span A: the man went to the - # Span B: to the store and bought - # Span C: and bought a gallon of - # ... - # - # Now the word 'bought' will have two scores from spans B and C. We only - # want to consider the score with "maximum context", which we define as - # the *minimum* of its left and right context (the *sum* of left and - # right context will always be the same, of course). - # - # In the example the maximum context for 'bought' would be span C since - # it has 1 left context and 3 right context, while span B has 4 left context - # and 0 right context. - best_score = None - best_span_index = None - for (span_index, doc_span) in enumerate(doc_spans): - end = doc_span.start + doc_span.length - 1 - if position < doc_span.start: - continue - if position > end: - continue - num_left_context = position - doc_span.start - num_right_context = end - position - score = min(num_left_context, num_right_context) + 0.01 * doc_span.length - if best_score is None or score > best_score: - best_score = score - best_span_index = span_index - - return cur_span_index == best_span_index - - def convert_examples_to_features(examples, tokenizer, max_seq_length, - doc_stride, max_query_length, is_training): - """Loads a data file into a list of `InputBatch`s.""" - - unique_id = 1000000000 - result = [] - - for (example_index, example) in enumerate(examples): - query_tokens = tokenizer.tokenize(example.question_text) - - if len(query_tokens) > max_query_length: - query_tokens = query_tokens[0:max_query_length] - - tok_to_orig_index = [] - orig_to_tok_index = [] - all_doc_tokens = [] - for (i, token) in enumerate(example.doc_tokens): - orig_to_tok_index.append(len(all_doc_tokens)) - sub_tokens = tokenizer.tokenize(token) - for sub_token in sub_tokens: - tok_to_orig_index.append(i) - all_doc_tokens.append(sub_token) - - tok_start_position = None - tok_end_position = None - if is_training and example.is_impossible: - tok_start_position = -1 - tok_end_position = -1 - if is_training and not example.is_impossible: - tok_start_position = orig_to_tok_index[example.start_position] - if example.end_position < len(example.doc_tokens) - 1: - tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 - else: - tok_end_position = len(all_doc_tokens) - 1 - (tok_start_position, tok_end_position) = _improve_answer_span( - all_doc_tokens, tok_start_position, tok_end_position, tokenizer, - example.orig_answer_text) - - # The -3 accounts for [CLS], [SEP] and [SEP] - max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 - - # We can have documents that are longer than the maximum sequence length. - # To deal with this we do a sliding window approach, where we take chunks - # of the up to our max length with a stride of `doc_stride`. - _DocSpan = collections.namedtuple( # pylint: disable=invalid-name - "DocSpan", ["start", "length"]) - doc_spans = [] - start_offset = 0 - while start_offset < len(all_doc_tokens): - length = len(all_doc_tokens) - start_offset - if length > max_tokens_for_doc: - length = max_tokens_for_doc - doc_spans.append(_DocSpan(start=start_offset, length=length)) - if start_offset + length == len(all_doc_tokens): - break - start_offset += min(length, doc_stride) - - for (doc_span_index, doc_span) in enumerate(doc_spans): - tokens = [] - token_to_orig_map = {} - token_is_max_context = {} - segment_ids = [] - tokens.append("[CLS]") - segment_ids.append(0) - for token in query_tokens: - tokens.append(token) - segment_ids.append(0) - tokens.append("[SEP]") - segment_ids.append(0) - - for i in range(doc_span.length): - split_token_index = doc_span.start + i - token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] - - is_max_context = _check_is_max_context(doc_spans, doc_span_index, - split_token_index) - token_is_max_context[len(tokens)] = is_max_context - tokens.append(all_doc_tokens[split_token_index]) - segment_ids.append(1) - tokens.append("[SEP]") - segment_ids.append(1) - - input_ids = tokenizer.convert_tokens_to_ids(tokens) - - # The mask has 1 for real tokens and 0 for padding tokens. Only real - # tokens are attended to. - input_mask = [1] * len(input_ids) - - # Zero-pad up to the sequence length. - while len(input_ids) < max_seq_length: - input_ids.append(0) - input_mask.append(0) - segment_ids.append(0) - - assert len(input_ids) == max_seq_length - assert len(input_mask) == max_seq_length - assert len(segment_ids) == max_seq_length - - start_position = None - end_position = None - if is_training and not example.is_impossible: - # For training, if our document chunk does not contain an annotation - # we throw it out, since there is nothing to predict. - doc_start = doc_span.start - doc_end = doc_span.start + doc_span.length - 1 - out_of_span = False - if not (tok_start_position >= doc_start and tok_end_position <= doc_end): - out_of_span = True - if out_of_span: - start_position = 0 - end_position = 0 - else: - doc_offset = len(query_tokens) + 2 - start_position = tok_start_position - doc_start + doc_offset - end_position = tok_end_position - doc_start + doc_offset - - if is_training and example.is_impossible: - start_position = 0 - end_position = 0 - - if example_index < 20: - tf.logging.info("*** Example ***") - tf.logging.info("unique_id: %s" % (unique_id)) - tf.logging.info("example_index: %s" % (example_index)) - tf.logging.info("doc_span_index: %s" % (doc_span_index)) - tf.logging.info("tokens: %s" % " ".join( - [tokenization.printable_text(x) for x in tokens])) - tf.logging.info("token_to_orig_map: %s" % " ".join( - ["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)])) - tf.logging.info("token_is_max_context: %s" % " ".join([ - "%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context) - ])) - tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) - tf.logging.info( - "input_mask: %s" % " ".join([str(x) for x in input_mask])) - tf.logging.info( - "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) - if is_training and example.is_impossible: - tf.logging.info("impossible example") - if is_training and not example.is_impossible: - answer_text = " ".join(tokens[start_position:(end_position + 1)]) - tf.logging.info("start_position: %d" % (start_position)) - tf.logging.info("end_position: %d" % (end_position)) - tf.logging.info( - "answer: %s" % (tokenization.printable_text(answer_text))) - - feature = InputFeatures( - unique_id=unique_id, - example_index=example_index, - doc_span_index=doc_span_index, - tokens=tokens, - token_to_orig_map=token_to_orig_map, - token_is_max_context=token_is_max_context, - input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - start_position=start_position, - end_position=end_position, - is_impossible=example.is_impossible) - - # Run callback - - result.append(feature) - unique_id += 1 - return result - - class SquadExample(object): - - def __init__(self, - qas_id, - question_text, - doc_tokens, - orig_answer_text=None, - start_position=None, - end_position=None, - is_impossible=False): - self.qas_id = qas_id - self.question_text = question_text - self.doc_tokens = doc_tokens - self.orig_answer_text = orig_answer_text - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - - def __str__(self): - return self.__repr__() - - def __repr__(self): - s = "" - s += "qas_id: %s" % (tokenization.printable_text(self.qas_id)) - s += ", question_text: %s" % ( - tokenization.printable_text(self.question_text)) - s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) - if self.start_position: - s += ", start_position: %d" % (self.start_position) - if self.start_position: - s += ", end_position: %d" % (self.end_position) - if self.start_position: - s += ", is_impossible: %r" % (self.is_impossible) - return s - - def read_squad_examples(input_file, is_training): - """Read a SQuAD json file into a list of SquadExample.""" - - input_data = json.loads(input_file)["data"] - - def is_whitespace(c): - if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: - return True - return False - - examples = [] - for entry in input_data: - for paragraph in entry["paragraphs"]: - paragraph_text = paragraph["context"] - doc_tokens = [] - char_to_word_offset = [] - prev_is_whitespace = True - for c in paragraph_text: - if is_whitespace(c): - prev_is_whitespace = True - else: - if prev_is_whitespace: - doc_tokens.append(c) - else: - doc_tokens[-1] += c - prev_is_whitespace = False - char_to_word_offset.append(len(doc_tokens) - 1) - - for qa in paragraph["qas"]: - qas_id = qa["id"] - question_text = qa["question"] - start_position = None - end_position = None - orig_answer_text = None - is_impossible = False - if is_training: - - if (len(qa["answers"]) != 1) and (not is_impossible): - raise ValueError( - "For training, each question should have exactly 1 answer.") - if not is_impossible: - answer = qa["answers"][0] - orig_answer_text = answer["text"] - answer_offset = answer["answer_start"] - answer_length = len(orig_answer_text) - start_position = char_to_word_offset[answer_offset] - end_position = char_to_word_offset[answer_offset + answer_length - 1] - # Only add answers where the text can be exactly recovered from the - # document. If this CAN'T happen it's likely due to weird Unicode - # stuff so we will just skip the example. - # - # Note that this means for training mode, every example is NOT - # guaranteed to be preserved. - actual_text = " ".join( - doc_tokens[start_position:(end_position + 1)]) - cleaned_answer_text = " ".join( - tokenization.whitespace_tokenize(orig_answer_text)) - if actual_text.find(cleaned_answer_text) == -1: - tf.logging.warning("Could not find answer: '%s' vs. '%s'", - actual_text, cleaned_answer_text) - continue - else: - start_position = -1 - end_position = -1 - orig_answer_text = "" - - example = SquadExample( - qas_id=qas_id, - question_text=question_text, - doc_tokens=doc_tokens, - orig_answer_text=orig_answer_text, - start_position=start_position, - end_position=end_position, - is_impossible=is_impossible) - examples.append(example) - - return examples - - def get_result(title, text, question, url): - - data = make_json(title, text, question) - - examples = read_squad_examples(data, False) - - predict_files = convert_examples_to_features( - examples=examples, - tokenizer=tokenizer, - max_seq_length=512, - doc_stride=128, - max_query_length=100, - is_training=False, - ) - - headers = {"content-type": "application/json"} - all_results = [] - for predict_file in predict_files: - features = {} - features["unique_ids"] = predict_file.unique_id - features["input_mask"] = predict_file.input_mask - features["segment_ids"] = predict_file.segment_ids - features["input_ids"] = predict_file.input_ids - data_list = [] - data_list.append(features) - - data = json.dumps({"instances": data_list}) - - json_response = requests.post(url, data=data, headers=headers) - - x = json.loads(json_response.text) - - all_results.append( - RawResult( - unique_id=predict_file.unique_id, - start_logits=x['predictions'][0]['start_logits'], - end_logits=x['predictions'][0]['end_logits'])) - - result = write_predictions(examples, predict_files, all_results, 20, 64, True) - return result - - return get_result(title, text, question, url) diff --git a/scripts/global_setting.py b/scripts/global_setting.py deleted file mode 100644 index 51dfec1..0000000 --- a/scripts/global_setting.py +++ /dev/null @@ -1,31 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -# import collections -# import math -# import modeling -# import optimization -import tokenization -# import six -# import tensorflow as tf -# import os - -# Global variables - -# GPU number, default: -1, means not used -CUDA_VISIBLE_DEVICES = "2" - -# Questions to be trained/predicted -questions = ['Communication Service Name', 'Max Number of UEs', 'Data Rate Downlink', 'Latency', 'Data Rate Uplink', 'Resource Sharing Level', 'Mobility', 'Area'] - -# Configuration file -FLAGS_bert_config_file = '/home/run/uncased_L-12_H-768_A-12/bert_config.json' -FLAGS_vocab_file = '/home/run/uncased_L-12_H-768_A-12/vocab.txt' -FLAGS_init_checkpoint_squad = '/home/run/uncased_L-12_H-768_A-12/bert_model.ckpt' - -max_seq_length = 512 - - -tokenizer_ch = tokenization.FullTokenizer(vocab_file=FLAGS_vocab_file, do_lower_case=True) diff --git a/scripts/load_model.sh b/scripts/load_model.sh deleted file mode 100755 index 1961f6a..0000000 --- a/scripts/load_model.sh +++ /dev/null @@ -1,6 +0,0 @@ -path=$1 -use_gpu=$2 -export CUDA_VISIBLE_DEVICES=$use_gpu -netstat -nap | grep 8502 | awk 'NR==1{printf $7}' | sed 's/\([0-9]*\).*/\1/g' | xargs kill -9 -sleep 5 -nohup tensorflow_model_server --port=8500 --rest_api_port=8502 --model_name=predict --model_base_path=$path > server.log 2>&1 & \ No newline at end of file diff --git a/standalone/pom.xml b/standalone/pom.xml new file mode 100644 index 0000000..07377ca --- /dev/null +++ b/standalone/pom.xml @@ -0,0 +1,173 @@ + + + + 4.0.0 + + org.onap.usecase-ui.nlp + usecase-ui-nlp-parent + 1.0.0-SNAPSHOT + + + usecase-ui-nlp-standalone + pom + usecase-ui-nlp-standalone + distribute binary files and docker image for usecase-ui nlp + + + 3.0.1 + UTF-8 + UTF-8 + yyyyMMdd'T'HHmmss'Z' + target/assembly/linux64 + target/version/${usecaseui.version} + + + + usecse-ui-nlp + + + org.apache.maven.plugins + maven-resources-plugin + + + copy-resources-linux64 + process-resources + + copy-resources + + + ${dockeroutput} + + + src/main/assembly/ + false + + **/* + + + dockerfile + + + + true + + + + copy-dockerfile + process-resources + + copy-resources + + + ${dockeroutput} + + + src/main/assembly/ + false + + dockerfile + + + + true + + + + copy-source + process-resources + + copy-resources + + + ${dockeroutput}/scripts + + + ../nlp/scripts/ + false + + **/* + + + + true + + + + + + + + + + docker + + + + io.fabric8 + docker-maven-plugin + 0.16.5 + false + + + + onap/usecase-ui-nlp + + try + ${basedir}/${dockeroutput} + ${basedir}/${dockeroutput}/dockerfile + + ${usecaseui.version}-SNAPSHOT-latest + ${usecaseui.version}-STAGING-latest + ${usecaseui.version}-STAGING-${maven.build.timestamp} + + + + + + + + generate-images + package + + build + + + + push-images + deploy + + build + push + + + + + + + + + + + + + org.onap.usecase-ui.nlp + usecase-ui-nlp + ${project.version} + + + + diff --git a/standalone/src/main/assembly/LICENSE b/standalone/src/main/assembly/LICENSE new file mode 100644 index 0000000..5c767c3 --- /dev/null +++ b/standalone/src/main/assembly/LICENSE @@ -0,0 +1,473 @@ +THIS LICENSE FILE CONTAINS THE LICENSE APPLICABLE DEPENDING ON THE TYPE OF CONTRIBUTIONS. + +APACHE LICENSE 2 IS APPLICABLE FOR SOURCE CODE, CREATIVE COMMONS ATTRIBUTION 4.0 INTERNATIONAL FOR DOCUMENTATION + ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of this License; and +You must cause any modified files to carry prominent notices stating that You changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. +You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Attribution 4.0 International + +https://creativecommons.org/licenses/by/4.0/legalcode + +--------------------------------------------------------------------------------------- + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +--------------------------------------------------------------------------------------- + +Creative Commons Attribution 4.0 International Public License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution 4.0 International Public License ("Public License"). To the +extent this Public License may be interpreted as a contract, You are +granted the Licensed Rights in consideration of Your acceptance of +these terms and conditions, and the Licensor grants You such rights in +consideration of benefits the Licensor receives from making the +Licensed Material available under these terms and conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + d. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + e. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + f. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + g. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + h. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + i. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + j. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + k. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + 4. If You Share Adapted Material You produce, the Adapter's + License You apply must not prevent recipients of the Adapted + Material from complying with this Public License. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material; and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + +--------------------------------------------------------------------------------------- + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the "Licensor." The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. \ No newline at end of file diff --git a/standalone/src/main/assembly/dockerfile b/standalone/src/main/assembly/dockerfile new file mode 100644 index 0000000..d8797d3 --- /dev/null +++ b/standalone/src/main/assembly/dockerfile @@ -0,0 +1,21 @@ +FROM tensorflow/serving:latest + +WORKDIR /home/run/ + +RUN DEBIAN_FRONTEND=noninteractive apt-get update +RUN DEBIAN_FRONTEND=noninteractive apt-get install wget -y +RUN DEBIAN_FRONTEND=noninteractive apt-get install unzip -y + +RUN DEBIAN_FRONTEND=noninteractive apt-get install python3-pip python3-dev build-essential -y +RUN ln -s /usr/bin/python3 /usr/bin/python +RUN ln -s /usr/bin/pip3 /usr/bin/pip + +COPY requirements /home/run/requirements +RUN DEBIAN_FRONTEND=noninteractive pip install -r requirements + +COPY scripts /home/run/ + +COPY run.sh /home/run/run.sh + +ENTRYPOINT /home/run/run.sh + diff --git a/standalone/src/main/assembly/requirements b/standalone/src/main/assembly/requirements new file mode 100644 index 0000000..f9a91a1 --- /dev/null +++ b/standalone/src/main/assembly/requirements @@ -0,0 +1,29 @@ +entrypoints==0.3 +Flask==1.1.1 +future==0.17.1 +joblib==0.13.2 +json5==0.8.4 +jsonschema==3.0.1 +Keras-Applications==1.0.8 +Keras-Preprocessing==1.1.0 +lxml==4.3.4 +matplotlib==3.1.0 +mkl-fft==1.0.12 +mkl-random==1.0.2 +mkl-service==2.0.2 +numpy==1.16.4 +numpydoc==0.9.1 +openpyxl==2.6.2 +pandas==0.24.2 +protobuf==3.13.0 +requests==2.22.0 +scikit-learn==0.21.2 +setuptools==41.0.1 +six==1.12.0 +tensorboard==1.14.0 +tensorflow==1.14.0 +tensorflow-estimator==1.14.0 +urllib3==1.24.2 +xlrd==1.2.0 +XlsxWriter==1.1.8 +xlwt==1.3.0 diff --git a/standalone/src/main/assembly/run.sh b/standalone/src/main/assembly/run.sh new file mode 100755 index 0000000..1f42209 --- /dev/null +++ b/standalone/src/main/assembly/run.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# +# Copyright 2016-2017 ZTE Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +cd /home/run/ +wget https://github.com/google-research/bert/archive/master.zip +wget https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip + +unzip master.zip +unzip uncased_L-12_H-768_A-12.zip +rm master.zip uncased_L-12_H-768_A-12.zip +mv scripts bert-master/ +cd /home/run/bert-master/ + +nohup python -u api_squad_online.py 33011 > online.log 2>&1 & +nohup python -u api_squad_offline.py 33012 > offline.log 2>&1 & diff --git a/test_1.py b/test_1.py deleted file mode 100644 index 3e50176..0000000 --- a/test_1.py +++ /dev/null @@ -1,24 +0,0 @@ -# -*- coding:utf-8 -*- -import pytest - - -@pytest.fixture(scope='function') -def setup_function(request): - def teardown_function(): - print("teardown_function called.") - request.addfinalizer(teardown_function) # 此内嵌函数做teardown工作 - print('setup_function called.') - - -@pytest.fixture(scope='module') -def setup_module(request): - def teardown_module(): - print("teardown_module called.") - request.addfinalizer(teardown_module) - print('setup_module called.') - - -# @pytest.mark.website -def test_1(setup_function): - print('Test_1 called.') - assert 1 == 1 diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 2b6716e..0000000 --- a/tox.ini +++ /dev/null @@ -1,26 +0,0 @@ -# content of: tox.ini , put in same dir as setup.py -[tox] -envlist = py36,pep8,cov -skipsdist = true - -[flake8] -ignore = E501,E722 -exclude = ./venv-tox,./.tox,./venv,./docs - -[testenv:pep8] -deps = flake8 -commands = flake8 - -[testenv] -deps = -r{toxinidir}/requirements.txt -commands = pytest - -[testenv:py36] -commands = - {[testenv]commands} - -[testenv:cov] -deps = pytest - pytest-cov -commands = pytest --cov-report=html - -- cgit 1.2.3-korg