Skip to content

Commit b88ed3e

Browse files
committed
refactored variable names
1 parent 93fe116 commit b88ed3e

File tree

2 files changed

+9
-11
lines changed

2 files changed

+9
-11
lines changed

tests/nlu/featurizers/test_lm_featurizer.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,11 @@
55
from rasa.nlu.featurizers.dense_featurizer.lm_featurizer import LanguageModelFeaturizer
66
from rasa.nlu.utils.hugging_face.hf_transformers import HFTransformersNLP
77
from rasa.nlu.constants import (
8-
TEXT_ATTRIBUTE,
8+
TEXT,
99
DENSE_FEATURE_NAMES,
1010
TOKENS_NAMES,
11-
RESPONSE_ATTRIBUTE,
12-
INTENT_ATTRIBUTE,
11+
RESPONSE,
12+
INTENT,
1313
LANGUAGE_MODEL_DOCS,
1414
)
1515
from rasa.nlu.training_data import Message
@@ -206,7 +206,7 @@ def test_lm_featurizer_shape_values(
206206

207207
for index in range(len(texts)):
208208

209-
computed_feature_vec = messages[index].get(DENSE_FEATURE_NAMES[TEXT_ATTRIBUTE])
209+
computed_feature_vec = messages[index].get(DENSE_FEATURE_NAMES[TEXT])
210210
computed_sequence_vec, computed_sentence_vec = (
211211
computed_feature_vec[:-1],
212212
computed_feature_vec[-1],
@@ -226,6 +226,6 @@ def test_lm_featurizer_shape_values(
226226
computed_sentence_vec[:5], expected_cls_vec[index], atol=1e-5
227227
)
228228

229-
intent_vec = messages[index].get(DENSE_FEATURE_NAMES[INTENT_ATTRIBUTE])
229+
intent_vec = messages[index].get(DENSE_FEATURE_NAMES[INTENT])
230230

231231
assert intent_vec is None

tests/nlu/tokenizers/test_lm_tokenizer.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import pytest
22

33
from rasa.nlu.training_data import Message, TrainingData
4-
from rasa.nlu.constants import TEXT_ATTRIBUTE, INTENT_ATTRIBUTE, TOKENS_NAMES
4+
from rasa.nlu.constants import TEXT, INTENT, TOKENS_NAMES
55
from rasa.nlu.tokenizers.lm_tokenizer import LanguageModelTokenizer
66
from rasa.nlu.utils.hugging_face.hf_transformers import HFTransformersNLP
77

@@ -306,7 +306,7 @@ def test_lm_tokenizer_edge_cases(model_name, texts, expected_tokens, expected_in
306306

307307
message = Message.build(text=text)
308308
transformers_nlp.process(message)
309-
tokens = lm_tokenizer.tokenize(message, TEXT_ATTRIBUTE)
309+
tokens = lm_tokenizer.tokenize(message, TEXT)
310310

311311
assert [t.text for t in tokens] == gt_tokens
312312
assert [t.start for t in tokens] == [i[0] for i in gt_indices]
@@ -330,13 +330,11 @@ def test_lm_tokenizer_custom_intent_symbol(text, expected_tokens):
330330
lm_tokenizer = LanguageModelTokenizer(component_config)
331331

332332
message = Message(text)
333-
message.set(INTENT_ATTRIBUTE, text)
333+
message.set(INTENT, text)
334334

335335
td = TrainingData([message])
336336

337337
transformers_nlp.train(td)
338338
lm_tokenizer.train(td)
339339

340-
assert [
341-
t.text for t in message.get(TOKENS_NAMES[INTENT_ATTRIBUTE])
342-
] == expected_tokens
340+
assert [t.text for t in message.get(TOKENS_NAMES[INTENT])] == expected_tokens

0 commit comments

Comments
 (0)