16
16
from tensorflow .python .keras .regularizers import l2
17
17
18
18
from .activation import activation_fun
19
+ from .utils import concat_fun
19
20
20
21
21
22
class AFMLayer (Layer ):
@@ -113,7 +114,7 @@ def call(self, inputs, **kwargs):
113
114
self .normalized_att_score = tf .nn .softmax (tf .tensordot (
114
115
attention_temp , self .projection_h , axes = (- 1 , 0 )), dim = 1 )
115
116
attention_output = tf .reduce_sum (
116
- self .normalized_att_score * bi_interaction , axis = 1 )
117
+ self .normalized_att_score * bi_interaction , axis = 1 )
117
118
118
119
attention_output = tf .nn .dropout (
119
120
attention_output , self .keep_prob , seed = 1024 )
@@ -130,7 +131,7 @@ def compute_output_shape(self, input_shape):
130
131
'on a list of inputs.' )
131
132
return (None , 1 )
132
133
133
- def get_config (self ,):
134
+ def get_config (self , ):
134
135
config = {'attention_factor' : self .attention_factor ,
135
136
'l2_reg_w' : self .l2_reg_w , 'keep_prob' : self .keep_prob , 'seed' : self .seed }
136
137
base_config = super (AFMLayer , self ).get_config ()
@@ -175,7 +176,7 @@ def call(self, inputs, **kwargs):
175
176
concated_embeds_value , axis = 1 , keep_dims = True ))
176
177
sum_of_square = tf .reduce_sum (
177
178
concated_embeds_value * concated_embeds_value , axis = 1 , keep_dims = True )
178
- cross_term = 0.5 * (square_of_sum - sum_of_square )
179
+ cross_term = 0.5 * (square_of_sum - sum_of_square )
179
180
180
181
return cross_term
181
182
@@ -206,7 +207,7 @@ class CIN(Layer):
206
207
- [Lian J, Zhou X, Zhang F, et al. xDeepFM: Combining Explicit and Implicit Feature Interactions for Recommender Systems[J]. arXiv preprint arXiv:1803.05170, 2018.] (https://arxiv.org/pdf/1803.05170.pdf)
207
208
"""
208
209
209
- def __init__ (self , layer_size = (128 , 128 ), activation = 'relu' ,split_half = True , l2_reg = 1e-5 ,seed = 1024 , ** kwargs ):
210
+ def __init__ (self , layer_size = (128 , 128 ), activation = 'relu' , split_half = True , l2_reg = 1e-5 , seed = 1024 , ** kwargs ):
210
211
if len (layer_size ) == 0 :
211
212
raise ValueError (
212
213
"layer_size must be a list(tuple) of length greater than 1" )
@@ -230,7 +231,8 @@ def build(self, input_shape):
230
231
self .filters .append (self .add_weight (name = 'filter' + str (i ),
231
232
shape = [1 , self .field_nums [- 1 ]
232
233
* self .field_nums [0 ], size ],
233
- dtype = tf .float32 , initializer = glorot_uniform (seed = self .seed + i ), regularizer = l2 (self .l2_reg )))
234
+ dtype = tf .float32 , initializer = glorot_uniform (seed = self .seed + i ),
235
+ regularizer = l2 (self .l2_reg )))
234
236
235
237
self .bias .append (self .add_weight (name = 'bias' + str (i ), shape = [size ], dtype = tf .float32 ,
236
238
initializer = tf .keras .initializers .Zeros ()))
@@ -346,13 +348,13 @@ def build(self, input_shape):
346
348
"Unexpected inputs dimensions %d, expect to be 2 dimensions" % (len (input_shape ),))
347
349
348
350
dim = input_shape [- 1 ].value
349
- self .kernels = [self .add_weight (name = 'kernel' + str (i ),
351
+ self .kernels = [self .add_weight (name = 'kernel' + str (i ),
350
352
shape = (dim , 1 ),
351
353
initializer = glorot_normal (
352
354
seed = self .seed ),
353
355
regularizer = l2 (self .l2_reg ),
354
356
trainable = True ) for i in range (self .layer_num )]
355
- self .bias = [self .add_weight (name = 'bias' + str (i ),
357
+ self .bias = [self .add_weight (name = 'bias' + str (i ),
356
358
shape = (dim , 1 ),
357
359
initializer = Zeros (),
358
360
trainable = True ) for i in range (self .layer_num )]
@@ -373,7 +375,7 @@ def call(self, inputs, **kwargs):
373
375
x_l = tf .squeeze (x_l , axis = 2 )
374
376
return x_l
375
377
376
- def get_config (self ,):
378
+ def get_config (self , ):
377
379
378
380
config = {'layer_num' : self .layer_num ,
379
381
'l2_reg' : self .l2_reg , 'seed' : self .seed }
@@ -436,7 +438,7 @@ class InnerProductLayer(Layer):
436
438
product or inner product between feature vectors.
437
439
438
440
Input shape
439
- - A list of N 3D tensor with shape: ``(batch_size,1,embedding_size)``.
441
+ - a list of 3D tensor with shape: ``(batch_size,1,embedding_size)``.
440
442
441
443
Output shape
442
444
- 3D tensor with shape: ``(batch_size, N*(N-1)/2 ,1)`` if use reduce_sum. or 3D tensor with shape: ``(batch_size, N*(N-1)/2, embedding_size )`` if not use reduce_sum.
@@ -492,7 +494,9 @@ def call(self, inputs, **kwargs):
492
494
col .append (j )
493
495
p = tf .concat ([embed_list [idx ]
494
496
for idx in row ], axis = 1 ) # batch num_pairs k
495
- q = tf .concat ([embed_list [idx ] for idx in col ], axis = 1 )
497
+ q = tf .concat ([embed_list [idx ]
498
+ for idx in col ], axis = 1 )
499
+
496
500
inner_product = p * q
497
501
if self .reduce_sum :
498
502
inner_product = tf .reduce_sum (
@@ -509,7 +513,7 @@ def compute_output_shape(self, input_shape):
509
513
else :
510
514
return (input_shape [0 ], num_pairs , embed_size )
511
515
512
- def get_config (self ,):
516
+ def get_config (self , ):
513
517
config = {'reduce_sum' : self .reduce_sum , }
514
518
base_config = super (InnerProductLayer , self ).get_config ()
515
519
return dict (list (base_config .items ()) + list (config .items ()))
@@ -549,14 +553,18 @@ def build(self, input_shape):
549
553
raise ValueError (
550
554
"Unexpected inputs dimensions %d, expect to be 3 dimensions" % (len (input_shape )))
551
555
embedding_size = input_shape [- 1 ].value
552
- self .W_Query = self .add_weight (name = 'query' , shape = [embedding_size , self .att_embedding_size * self .head_num ], dtype = tf .float32 ,
556
+ self .W_Query = self .add_weight (name = 'query' , shape = [embedding_size , self .att_embedding_size * self .head_num ],
557
+ dtype = tf .float32 ,
553
558
initializer = tf .keras .initializers .TruncatedNormal (seed = self .seed ))
554
- self .W_key = self .add_weight (name = 'key' , shape = [embedding_size , self .att_embedding_size * self .head_num ], dtype = tf .float32 ,
555
- initializer = tf .keras .initializers .TruncatedNormal (seed = self .seed + 1 ))
556
- self .W_Value = self .add_weight (name = 'value' , shape = [embedding_size , self .att_embedding_size * self .head_num ], dtype = tf .float32 ,
557
- initializer = tf .keras .initializers .TruncatedNormal (seed = self .seed + 2 ))
559
+ self .W_key = self .add_weight (name = 'key' , shape = [embedding_size , self .att_embedding_size * self .head_num ],
560
+ dtype = tf .float32 ,
561
+ initializer = tf .keras .initializers .TruncatedNormal (seed = self .seed + 1 ))
562
+ self .W_Value = self .add_weight (name = 'value' , shape = [embedding_size , self .att_embedding_size * self .head_num ],
563
+ dtype = tf .float32 ,
564
+ initializer = tf .keras .initializers .TruncatedNormal (seed = self .seed + 2 ))
558
565
if self .use_res :
559
- self .W_Res = self .add_weight (name = 'res' , shape = [embedding_size , self .att_embedding_size * self .head_num ], dtype = tf .float32 ,
566
+ self .W_Res = self .add_weight (name = 'res' , shape = [embedding_size , self .att_embedding_size * self .head_num ],
567
+ dtype = tf .float32 ,
560
568
initializer = tf .keras .initializers .TruncatedNormal (seed = self .seed ))
561
569
562
570
# Be sure to call this somewhere!
@@ -656,10 +664,12 @@ def build(self, input_shape):
656
664
embed_size = input_shape [- 1 ].value
657
665
if self .kernel_type == 'mat' :
658
666
659
- self .kernel = self .add_weight (shape = (embed_size , num_pairs , embed_size ), initializer = glorot_uniform (seed = self .seed ),
667
+ self .kernel = self .add_weight (shape = (embed_size , num_pairs , embed_size ),
668
+ initializer = glorot_uniform (seed = self .seed ),
660
669
name = 'kernel' )
661
670
elif self .kernel_type == 'vec' :
662
- self .kernel = self .add_weight (shape = (num_pairs , embed_size ,), initializer = glorot_uniform (self .seed ), name = 'kernel'
671
+ self .kernel = self .add_weight (shape = (num_pairs , embed_size ,), initializer = glorot_uniform (self .seed ),
672
+ name = 'kernel'
663
673
)
664
674
elif self .kernel_type == 'num' :
665
675
self .kernel = self .add_weight (
@@ -737,7 +747,88 @@ def compute_output_shape(self, input_shape):
737
747
num_pairs = int (num_inputs * (num_inputs - 1 ) / 2 )
738
748
return (None , num_pairs )
739
749
740
- def get_config (self ,):
750
+ def get_config (self , ):
741
751
config = {'kernel_type' : self .kernel_type , 'seed' : self .seed }
742
752
base_config = super (OutterProductLayer , self ).get_config ()
743
753
return dict (list (base_config .items ()) + list (config .items ()))
754
+
755
+
756
+ class FGCNNLayer (Layer ):
757
+ """Feature Generation Layer used in FGCNN,including Convolution,MaxPooling and Recombination.
758
+
759
+ Input shape
760
+ - A 3D tensor with shape:``(batch_size,field_size,embedding_size)``.
761
+
762
+ Output shape
763
+ - 3D tensor with shape: ``(batch_size,new_feture_num,embedding_size)``.
764
+
765
+ References
766
+ - [Liu B, Tang R, Chen Y, et al. Feature Generation by Convolutional Neural Network for Click-Through Rate Prediction[J]. arXiv preprint arXiv:1904.04447, 2019.](https://arxiv.org/pdf/1904.04447)
767
+
768
+ """
769
+
770
+ def __init__ (self , filters = (14 , 16 ,), kernel_width = (7 , 7 ,), new_maps = (3 , 3 ,), pooling_width = (2 , 2 ),
771
+ ** kwargs ):
772
+ if not (len (filters ) == len (kernel_width ) == len (new_maps ) == len (pooling_width )):
773
+ raise ValueError ("length of argument must be equal" )
774
+ self .filters = filters
775
+ self .kernel_width = kernel_width
776
+ self .new_maps = new_maps
777
+ self .pooling_width = pooling_width
778
+
779
+ super (FGCNNLayer , self ).__init__ (** kwargs )
780
+
781
+ def build (self , input_shape ):
782
+
783
+ if len (input_shape ) != 3 :
784
+ raise ValueError (
785
+ "Unexpected inputs dimensions %d, expect to be 3 dimensions" % (len (input_shape )))
786
+
787
+ super (FGCNNLayer , self ).build (
788
+ input_shape ) # Be sure to call this somewhere!
789
+
790
+ def call (self , inputs , ** kwargs ):
791
+
792
+ if K .ndim (inputs ) != 3 :
793
+ raise ValueError (
794
+ "Unexpected inputs dimensions %d, expect to be 3 dimensions" % (K .ndim (inputs )))
795
+
796
+ embedding_size = inputs .shape [- 1 ].value
797
+ pooling_result = tf .keras .layers .Lambda (lambda x : tf .expand_dims (x , axis = 3 ))(inputs )
798
+
799
+ new_feature_list = []
800
+
801
+ for i in range (1 , len (self .filters ) + 1 ):
802
+ filters = self .filters [i - 1 ]
803
+ width = self .kernel_width [i - 1 ]
804
+ new_filters = self .new_maps [i - 1 ]
805
+ pooling_width = self .pooling_width [i - 1 ]
806
+ conv_result = tf .keras .layers .Conv2D (filters = filters , kernel_size = (width , 1 ), strides = (1 , 1 ),
807
+ padding = 'same' ,
808
+ activation = 'tanh' , use_bias = True , )(pooling_result )
809
+ pooling_result = tf .keras .layers .MaxPooling2D (pool_size = (pooling_width , 1 ))(conv_result )
810
+ flatten_result = tf .keras .layers .Flatten ()(pooling_result )
811
+ new_result = tf .keras .layers .Dense (pooling_result .shape [1 ].value * embedding_size * new_filters ,
812
+ activation = 'tanh' , use_bias = True )(flatten_result )
813
+ new_feature_list .append (
814
+ tf .keras .layers .Reshape ((pooling_result .shape [1 ].value * new_filters , embedding_size ))(new_result ))
815
+ new_features = concat_fun (new_feature_list , axis = 1 )
816
+ return new_features
817
+
818
+ def compute_output_shape (self , input_shape ):
819
+
820
+ new_features_num = 0
821
+ features_num = input_shape [1 ]
822
+
823
+ for i in range (0 , len (self .kernel_width )):
824
+ pooled_features_num = features_num // self .pooling_width [i ]
825
+ new_features_num += self .new_maps [i ] * pooled_features_num
826
+ features_num = pooled_features_num
827
+
828
+ return (None , new_features_num , input_shape [- 1 ])
829
+
830
+ def get_config (self , ):
831
+ config = {'kernel_width' : self .kernel_width , 'filters' : self .filters , 'new_maps' : self .new_maps ,
832
+ 'pooling_width' : self .pooling_width }
833
+ base_config = super (FGCNNLayer , self ).get_config ()
834
+ return dict (list (base_config .items ()) + list (config .items ()))
0 commit comments