Skip to content

Commit

Permalink
support deepfm and kaggle display advertising dataset
Browse files Browse the repository at this point in the history
  • Loading branch information
kaierlong authored and hellowaywewe committed Jan 19, 2022
1 parent 669b988 commit 79c664b
Show file tree
Hide file tree
Showing 11 changed files with 887 additions and 6 deletions.
4 changes: 3 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,6 @@ gensim==3.8.1
PyYAML
opencv-python==4.1.2.30
flask_cors>=3.0.10
pycocotools>=2.0.0 # for st test
pycocotools>=2.0.0 # for st test
wget==3.2
scikit-learn==1.0.1
3 changes: 3 additions & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,9 @@ def _write_version(file):
'PyYAML',
'opencv-python==4.1.2.30',
'flask_cors>=3.0.10',
'wget==3.2',
'scikit-learn==1.0.1',
'tqdm==4.62.3',
]

test_required_package = [
Expand Down
56 changes: 55 additions & 1 deletion tinyms/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from mindspore.train.callback import *
from . import Tensor

__all__ = ['LossTimeMonitor', 'BertLossCallBack']
__all__ = ['LossTimeMonitor', 'LossTimeMonitorV2', 'BertLossCallBack']
__all__.extend(callback.__all__)


Expand Down Expand Up @@ -81,6 +81,60 @@ def step_end(self, run_context):
np.mean(self.losses), step_mseconds, self.lr_init[cb_params.cur_step_num - 1]))


class LossTimeMonitorV2(Callback):
"""
Monitor loss and time version 2.0.
This version will not show learning rate.
Args:
Returns:
None
Examples:
>>> from tinyms.callbacks import LossTimeMonitorV2
>>>
>>> LossTimeMonitorV2()
"""

def __init__(self):
super(LossTimeMonitorV2, self).__init__()

def epoch_begin(self, run_context):
self.losses = []
self.epoch_time = time.time()

def epoch_end(self, run_context):
cb_params = run_context.original_args()

epoch_mseconds = (time.time() - self.epoch_time) * 1000
per_step_mseconds = epoch_mseconds / cb_params.batch_num
print("epoch time: {:5.3f}, per step time: {:5.3f}, avg loss: {:5.3f}".
format(epoch_mseconds, per_step_mseconds, np.mean(self.losses)), flush=True)

def step_begin(self, run_context):
self.step_time = time.time()

def step_end(self, run_context):
cb_params = run_context.original_args()
step_mseconds = (time.time() - self.step_time) * 1000
step_loss = cb_params.net_outputs
# arr_lr = cb_params.optimizer.learning_rate.asnumpy()
# lr = float(np.array2string(arr_lr))

if isinstance(step_loss, (tuple, list)) and isinstance(step_loss[0], Tensor):
step_loss = step_loss[0]
if isinstance(step_loss, Tensor):
step_loss = np.mean(step_loss.asnumpy())

self.losses.append(step_loss)
cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num

print("epoch: [{:3d}/{:3d}], step:[{:5d}/{:5d}], loss:[{:5.3f}/{:5.3f}], time:[{:5.3f}]]".format(
cb_params.cur_epoch_num - 1, cb_params.epoch_num, cur_step_in_epoch, cb_params.batch_num, step_loss,
np.mean(self.losses), step_mseconds), flush=True)


class BertLossCallBack(Callback):
"""
Monitor the loss in training.
Expand Down
Loading

0 comments on commit 79c664b

Please sign in to comment.