@@ -55,7 +55,7 @@ def __init__(self, component_config=None):
55
55
56
56
self .partial_processing_pipeline = None
57
57
self .partial_processing_context = None
58
- self .layer_indexes = [- 1 ]
58
+ self .layer_indexes = [- 2 ]
59
59
bert_config = modeling .BertConfig .from_json_file ("/Users/oakela/Documents/RASA/bert/uncased_L-24_H-1024_A-16/bert_config.json" )
60
60
self .tokenizer = tokenization .FullTokenizer (vocab_file = "/Users/oakela/Documents/RASA/bert/uncased_L-24_H-1024_A-16/vocab.txt" , do_lower_case = True )
61
61
is_per_host = tf .contrib .tpu .InputPipelineConfig .PER_HOST_V2
@@ -66,7 +66,7 @@ def __init__(self, component_config=None):
66
66
per_host_input_for_training = is_per_host ))
67
67
model_fn = model_fn_builder (
68
68
bert_config = bert_config ,
69
- init_checkpoint = "/Users/oakela/Documents/RASA/bert/uncased_L-24_H-1024_A-16/bert_model.ckpt.index " ,
69
+ init_checkpoint = "/Users/oakela/Documents/RASA/bert/uncased_L-24_H-1024_A-16/bert_model.ckpt" ,
70
70
layer_indexes = self .layer_indexes ,
71
71
use_tpu = False ,
72
72
use_one_hot_embeddings = False )
@@ -83,6 +83,7 @@ def train(self, training_data, config, **kwargs):
83
83
fs = create_features (messages , self .estimator , self .tokenizer , self .layer_indexes )
84
84
features = []
85
85
for x in fs :
86
+ # features.append(np.array(x['features'][0]['layers'][0]['values']))
86
87
feats = [y ['layers' ][0 ]['values' ] for y in x ['features' ][1 :- 1 ]]
87
88
features .append (np .average (feats , axis = 0 ))
88
89
for i , message in enumerate (training_data .intent_examples ):
@@ -100,4 +101,5 @@ def _set_bert_features(self, message):
100
101
fs = create_features ([message .text ], self .estimator , self .tokenizer , self .layer_indexes )
101
102
feats = [x ['layers' ][0 ]['values' ] for x in fs [0 ]['features' ][1 :- 1 ]]
102
103
features = np .average (feats , axis = 0 )
104
+ # features = np.array(fs[0]['features'][0]['layers'][0]['values'])
103
105
message .set ("text_features" , features )
0 commit comments