@@ -72,7 +72,6 @@ class LogisticRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredicti
72
72
.. versionadded:: 1.3.0
73
73
"""
74
74
75
- # a placeholder to make it appear in the generated doc
76
75
threshold = Param (Params ._dummy (), "threshold" ,
77
76
"Threshold in binary classification prediction, in range [0, 1]." +
78
77
" If threshold and thresholds are both set, they must match." )
@@ -92,10 +91,6 @@ def __init__(self, featuresCol="features", labelCol="label", predictionCol="pred
92
91
super (LogisticRegression , self ).__init__ ()
93
92
self ._java_obj = self ._new_java_obj (
94
93
"org.apache.spark.ml.classification.LogisticRegression" , self .uid )
95
- #: param for threshold in binary classification, in range [0, 1].
96
- self .threshold = Param (self , "threshold" ,
97
- "Threshold in binary classification prediction, in range [0, 1]." +
98
- " If threshold and thresholds are both set, they must match." )
99
94
self ._setDefault (maxIter = 100 , regParam = 0.1 , tol = 1E-6 , threshold = 0.5 )
100
95
kwargs = self .__init__ ._input_kwargs
101
96
self .setParams (** kwargs )
@@ -232,18 +227,13 @@ class TreeClassifierParams(object):
232
227
"""
233
228
supportedImpurities = ["entropy" , "gini" ]
234
229
235
- # a placeholder to make it appear in the generated doc
236
230
impurity = Param (Params ._dummy (), "impurity" ,
237
231
"Criterion used for information gain calculation (case-insensitive). " +
238
232
"Supported options: " +
239
233
", " .join (supportedImpurities ))
240
234
241
235
def __init__ (self ):
242
236
super (TreeClassifierParams , self ).__init__ ()
243
- #: param for Criterion used for information gain calculation (case-insensitive).
244
- self .impurity = Param (self , "impurity" , "Criterion used for information " +
245
- "gain calculation (case-insensitive). Supported options: " +
246
- ", " .join (self .supportedImpurities ))
247
237
248
238
@since ("1.6.0" )
249
239
def setImpurity (self , value ):
@@ -485,7 +475,6 @@ class GBTClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol
485
475
.. versionadded:: 1.4.0
486
476
"""
487
477
488
- # a placeholder to make it appear in the generated doc
489
478
lossType = Param (Params ._dummy (), "lossType" ,
490
479
"Loss function which GBT tries to minimize (case-insensitive). " +
491
480
"Supported options: " + ", " .join (GBTParams .supportedLossTypes ))
@@ -504,10 +493,6 @@ def __init__(self, featuresCol="features", labelCol="label", predictionCol="pred
504
493
super (GBTClassifier , self ).__init__ ()
505
494
self ._java_obj = self ._new_java_obj (
506
495
"org.apache.spark.ml.classification.GBTClassifier" , self .uid )
507
- #: param for Loss function which GBT tries to minimize (case-insensitive).
508
- self .lossType = Param (self , "lossType" ,
509
- "Loss function which GBT tries to minimize (case-insensitive). " +
510
- "Supported options: " + ", " .join (GBTParams .supportedLossTypes ))
511
496
self ._setDefault (maxDepth = 5 , maxBins = 32 , minInstancesPerNode = 1 , minInfoGain = 0.0 ,
512
497
maxMemoryInMB = 256 , cacheNodeIds = False , checkpointInterval = 10 ,
513
498
lossType = "logistic" , maxIter = 20 , stepSize = 0.1 )
@@ -597,7 +582,6 @@ class NaiveBayes(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, H
597
582
.. versionadded:: 1.5.0
598
583
"""
599
584
600
- # a placeholder to make it appear in the generated doc
601
585
smoothing = Param (Params ._dummy (), "smoothing" , "The smoothing parameter, should be >= 0, " +
602
586
"default is 1.0" )
603
587
modelType = Param (Params ._dummy (), "modelType" , "The model type which is a string " +
@@ -615,13 +599,6 @@ def __init__(self, featuresCol="features", labelCol="label", predictionCol="pred
615
599
super (NaiveBayes , self ).__init__ ()
616
600
self ._java_obj = self ._new_java_obj (
617
601
"org.apache.spark.ml.classification.NaiveBayes" , self .uid )
618
- #: param for the smoothing parameter.
619
- self .smoothing = Param (self , "smoothing" , "The smoothing parameter, should be >= 0, " +
620
- "default is 1.0" )
621
- #: param for the model type.
622
- self .modelType = Param (self , "modelType" , "The model type which is a string " +
623
- "(case-sensitive). Supported options: multinomial (default) " +
624
- "and bernoulli." )
625
602
self ._setDefault (smoothing = 1.0 , modelType = "multinomial" )
626
603
kwargs = self .__init__ ._input_kwargs
627
604
self .setParams (** kwargs )
@@ -734,7 +711,6 @@ class MultilayerPerceptronClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol,
734
711
.. versionadded:: 1.6.0
735
712
"""
736
713
737
- # a placeholder to make it appear in the generated doc
738
714
layers = Param (Params ._dummy (), "layers" , "Sizes of layers from input layer to output layer " +
739
715
"E.g., Array(780, 100, 10) means 780 inputs, one hidden layer with 100 " +
740
716
"neurons and output layer of 10 neurons, default is [1, 1]." )
@@ -753,14 +729,6 @@ def __init__(self, featuresCol="features", labelCol="label", predictionCol="pred
753
729
super (MultilayerPerceptronClassifier , self ).__init__ ()
754
730
self ._java_obj = self ._new_java_obj (
755
731
"org.apache.spark.ml.classification.MultilayerPerceptronClassifier" , self .uid )
756
- self .layers = Param (self , "layers" , "Sizes of layers from input layer to output layer " +
757
- "E.g., Array(780, 100, 10) means 780 inputs, one hidden layer with " +
758
- "100 neurons and output layer of 10 neurons, default is [1, 1]." )
759
- self .blockSize = Param (self , "blockSize" , "Block size for stacking input data in " +
760
- "matrices. Data is stacked within partitions. If block size is " +
761
- "more than remaining data in a partition then it is adjusted to " +
762
- "the size of this data. Recommended size is between 10 and 1000, " +
763
- "default is 128." )
764
732
self ._setDefault (maxIter = 100 , tol = 1E-4 , layers = [1 , 1 ], blockSize = 128 )
765
733
kwargs = self .__init__ ._input_kwargs
766
734
self .setParams (** kwargs )
0 commit comments