8
8
9
9
"""
10
10
11
- from tensorflow .python .keras .layers import Dense , Embedding , Concatenate , Reshape , Flatten , add
11
+ from tensorflow .python .keras .layers import Dense , Embedding , Concatenate , Reshape , Flatten , add
12
12
from tensorflow .python .keras .models import Model
13
13
from tensorflow .python .keras .initializers import RandomNormal
14
14
from tensorflow .python .keras .regularizers import l2
15
15
16
16
17
17
from ..utils import get_input
18
- from ..layers import PredictionLayer ,MLP ,FM
18
+ from ..layers import PredictionLayer , MLP , FM
19
19
20
20
21
21
def DeepFM (feature_dim_dict , embedding_size = 8 ,
22
- use_fm = True , hidden_size = [256 , 256 ], l2_reg_linear = 0.00001 , l2_reg_embedding = 0.00001 , l2_reg_deep = 0 ,
23
- init_std = 0.0001 , seed = 1024 , keep_prob = 0.5 , activation = 'relu' , final_activation = 'sigmoid' , use_bn = False ):
22
+ use_fm = True , hidden_size = [128 , 128 ], l2_reg_linear = 0.00001 , l2_reg_embedding = 0.00001 , l2_reg_deep = 0 ,
23
+ init_std = 0.0001 , seed = 1024 , keep_prob = 1 , activation = 'relu' , final_activation = 'sigmoid' , use_bn = False ):
24
24
"""Instantiates the DeepFM Network architecture.
25
25
26
26
:param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
@@ -42,22 +42,24 @@ def DeepFM(feature_dim_dict, embedding_size=8,
42
42
dict ) or "sparse" not in feature_dim_dict or "dense" not in feature_dim_dict :
43
43
raise ValueError (
44
44
"feature_dim_dict must be a dict like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_5',]}" )
45
- if not isinstance (feature_dim_dict ["sparse" ],dict ):
46
- raise ValueError ("feature_dim_dict['sparse'] must be a dict,cur is" ,type (feature_dim_dict ['sparse' ]))
47
- if not isinstance (feature_dim_dict ["dense" ],list ):
48
- raise ValueError ("feature_dim_dict['dense'] must be a list,cur is" , type (feature_dim_dict ['dense' ]))
49
-
50
-
51
-
52
-
53
- sparse_input , dense_input = get_input (feature_dim_dict ,None )
54
- sparse_embedding , linear_embedding , = get_embeddings (feature_dim_dict ,embedding_size ,init_std ,seed ,l2_reg_deep ,l2_reg_linear )
55
-
56
- embed_list = [sparse_embedding [i ](sparse_input [i ]) for i in range (len (sparse_input ))]
57
- linear_term = [linear_embedding [i ](sparse_input [i ]) for i in range (len (sparse_input ))]
45
+ if not isinstance (feature_dim_dict ["sparse" ], dict ):
46
+ raise ValueError ("feature_dim_dict['sparse'] must be a dict,cur is" , type (
47
+ feature_dim_dict ['sparse' ]))
48
+ if not isinstance (feature_dim_dict ["dense" ], list ):
49
+ raise ValueError ("feature_dim_dict['dense'] must be a list,cur is" , type (
50
+ feature_dim_dict ['dense' ]))
51
+
52
+ sparse_input , dense_input = get_input (feature_dim_dict , None )
53
+ sparse_embedding , linear_embedding , = get_embeddings (
54
+ feature_dim_dict , embedding_size , init_std , seed , l2_reg_embedding , l2_reg_linear )
55
+
56
+ embed_list = [sparse_embedding [i ](sparse_input [i ])
57
+ for i in range (len (sparse_input ))]
58
+ linear_term = [linear_embedding [i ](sparse_input [i ])
59
+ for i in range (len (sparse_input ))]
58
60
if len (linear_term ) > 1 :
59
61
linear_term = add (linear_term )
60
- elif len (linear_term ) > 0 :
62
+ elif len (linear_term ) > 0 :
61
63
linear_term = linear_term [0 ]
62
64
else :
63
65
linear_term = 0
@@ -66,19 +68,22 @@ def DeepFM(feature_dim_dict, embedding_size=8,
66
68
continuous_embedding_list = list (
67
69
map (Dense (embedding_size , use_bias = False , kernel_regularizer = l2 (l2_reg_embedding ), ),
68
70
dense_input ))
69
- continuous_embedding_list = list (map (Reshape ((1 , embedding_size )), continuous_embedding_list ))
71
+ continuous_embedding_list = list (
72
+ map (Reshape ((1 , embedding_size )), continuous_embedding_list ))
70
73
embed_list += continuous_embedding_list
71
74
72
- dense_input_ = dense_input [0 ] if len (dense_input ) == 1 else Concatenate ()(dense_input )
73
- linear_dense_logit = Dense (1 ,activation = None ,use_bias = False ,kernel_regularizer = l2 (l2_reg_linear ))(dense_input_ )
74
- linear_term = add ([linear_dense_logit ,linear_term ])
75
+ dense_input_ = dense_input [0 ] if len (
76
+ dense_input ) == 1 else Concatenate ()(dense_input )
77
+ linear_dense_logit = Dense (
78
+ 1 , activation = None , use_bias = False , kernel_regularizer = l2 (l2_reg_linear ))(dense_input_ )
79
+ linear_term = add ([linear_dense_logit , linear_term ])
75
80
76
81
fm_input = Concatenate (axis = 1 )(embed_list )
77
82
deep_input = Flatten ()(fm_input )
78
83
fm_out = FM ()(fm_input )
79
84
deep_out = MLP (hidden_size , activation , l2_reg_deep , keep_prob ,
80
85
use_bn , seed )(deep_input )
81
- deep_logit = Dense (1 ,use_bias = False , activation = None )(deep_out )
86
+ deep_logit = Dense (1 , use_bias = False , activation = None )(deep_out )
82
87
83
88
if len (hidden_size ) == 0 and use_fm == False : # only linear
84
89
final_logit = linear_term
@@ -92,22 +97,21 @@ def DeepFM(feature_dim_dict, embedding_size=8,
92
97
raise NotImplementedError
93
98
94
99
output = PredictionLayer (final_activation )(final_logit )
95
- model = Model (inputs = sparse_input + dense_input , outputs = output )
100
+ model = Model (inputs = sparse_input + dense_input , outputs = output )
96
101
return model
97
102
98
103
99
104
def get_embeddings (feature_dim_dict , embedding_size , init_std , seed , l2_rev_V , l2_reg_w ):
100
105
sparse_embedding = [Embedding (feature_dim_dict ["sparse" ][feat ], embedding_size ,
101
- embeddings_initializer = RandomNormal (mean = 0.0 , stddev = init_std , seed = seed ),
106
+ embeddings_initializer = RandomNormal (
107
+ mean = 0.0 , stddev = init_std , seed = seed ),
102
108
embeddings_regularizer = l2 (l2_rev_V ),
103
109
name = 'sparse_emb_' + str (i ) + '-' + feat ) for i , feat in
104
110
enumerate (feature_dim_dict ["sparse" ])]
105
111
linear_embedding = [Embedding (feature_dim_dict ["sparse" ][feat ], 1 ,
106
112
embeddings_initializer = RandomNormal (mean = 0.0 , stddev = init_std ,
107
- seed = seed )
108
- , embeddings_regularizer = l2 (l2_reg_w ),
113
+ seed = seed ), embeddings_regularizer = l2 (l2_reg_w ),
109
114
name = 'linear_emb_' + str (i ) + '-' + feat ) for
110
115
i , feat in enumerate (feature_dim_dict ["sparse" ])]
111
116
112
117
return sparse_embedding , linear_embedding
113
-
0 commit comments