@@ -32,108 +32,83 @@ import com.typesafe.tools.mima.core._
32
32
*/
33
33
object MimaExcludes {
34
34
35
- def excludes (version : String ) = version match {
36
- case v if v.startsWith(" 1.1" ) =>
37
- Seq (
38
- MimaBuild .excludeSparkPackage(" deploy" ),
39
- MimaBuild .excludeSparkPackage(" graphx" )
40
- ) ++
41
- closures.map(method => ProblemFilters .exclude[MissingMethodProblem ](method)) ++
42
- Seq (
43
- // Adding new method to JavaRDLike trait - we should probably mark this as a developer API.
44
- ProblemFilters .exclude[MissingMethodProblem ](" org.apache.spark.api.java.JavaRDDLike.partitions" ),
45
- // We made a mistake earlier (ed06500d3) in the Java API to use default parameter values
46
- // for countApproxDistinct* functions, which does not work in Java. We later removed
47
- // them, and use the following to tell Mima to not care about them.
48
- ProblemFilters .exclude[IncompatibleResultTypeProblem ](
49
- " org.apache.spark.api.java.JavaPairRDD.countApproxDistinctByKey" ),
50
- ProblemFilters .exclude[IncompatibleResultTypeProblem ](
51
- " org.apache.spark.api.java.JavaPairRDD.countApproxDistinctByKey" ),
52
- ProblemFilters .exclude[MissingMethodProblem ](
53
- " org.apache.spark.api.java.JavaPairRDD.countApproxDistinct$default$1" ),
54
- ProblemFilters .exclude[MissingMethodProblem ](
55
- " org.apache.spark.api.java.JavaPairRDD.countApproxDistinctByKey$default$1" ),
56
- ProblemFilters .exclude[MissingMethodProblem ](
57
- " org.apache.spark.api.java.JavaRDD.countApproxDistinct$default$1" ),
58
- ProblemFilters .exclude[MissingMethodProblem ](
59
- " org.apache.spark.api.java.JavaRDDLike.countApproxDistinct$default$1" ),
60
- ProblemFilters .exclude[MissingMethodProblem ](
61
- " org.apache.spark.api.java.JavaDoubleRDD.countApproxDistinct$default$1" ),
62
- ProblemFilters .exclude[MissingMethodProblem ](
63
- " org.apache.spark.storage.MemoryStore.Entry" ),
64
- ProblemFilters .exclude[MissingMethodProblem ](
65
- " org.apache.spark.rdd.RDD.org$apache$spark$rdd$RDD$$debugChildren$1" ),
66
- ProblemFilters .exclude[MissingMethodProblem ](
67
- " org.apache.spark.rdd.RDD.org$apache$spark$rdd$RDD$$firstDebugString$1" ),
68
- ProblemFilters .exclude[MissingMethodProblem ](
69
- " org.apache.spark.rdd.RDD.org$apache$spark$rdd$RDD$$shuffleDebugString$1" ),
70
- ProblemFilters .exclude[MissingMethodProblem ](
71
- " org.apache.spark.rdd.RDD.org$apache$spark$rdd$RDD$$debugString$1" ),
72
- ProblemFilters .exclude[MissingMethodProblem ](
73
- " org.apache.spark.rdd.PairRDDFunctions.org$apache$spark$rdd$PairRDDFunctions$$"
74
- + " createZero$1" )
75
- ) ++
76
- Seq (
77
- ProblemFilters .exclude[MissingMethodProblem ](" org.apache.spark.streaming.flume.FlumeReceiver.this" )
78
- ) ++
79
- Seq ( // Ignore some private methods in ALS.
80
- ProblemFilters .exclude[MissingMethodProblem ](
81
- " org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$^dateFeatures" ),
82
- ProblemFilters .exclude[MissingMethodProblem ]( // The only public constructor is the one without arguments.
83
- " org.apache.spark.mllib.recommendation.ALS.this" ),
84
- ProblemFilters .exclude[MissingMethodProblem ](
85
- " org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$<init>$default$7" ),
86
- ProblemFilters .exclude[IncompatibleMethTypeProblem ](
87
- " org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$^dateFeatures" )
88
- ) ++
89
- MimaBuild .excludeSparkClass(" mllib.linalg.distributed.ColumnStatisticsAggregator" ) ++
90
- MimaBuild .excludeSparkClass(" rdd.ZippedRDD" ) ++
91
- MimaBuild .excludeSparkClass(" rdd.ZippedPartition" ) ++
92
- MimaBuild .excludeSparkClass(" util.SerializableHyperLogLog" ) ++
93
- MimaBuild .excludeSparkClass(" storage.Values" ) ++
94
- MimaBuild .excludeSparkClass(" storage.Entry" ) ++
95
- MimaBuild .excludeSparkClass(" storage.MemoryStore$Entry" ) ++
96
- Seq (
97
- ProblemFilters .exclude[IncompatibleMethTypeProblem ](
98
- " org.apache.spark.mllib.tree.impurity.Gini.calculate" ),
99
- ProblemFilters .exclude[IncompatibleMethTypeProblem ](
100
- " org.apache.spark.mllib.tree.impurity.Entropy.calculate" ),
101
- ProblemFilters .exclude[IncompatibleMethTypeProblem ](
102
- " org.apache.spark.mllib.tree.impurity.Variance.calculate" )
103
- )
104
- case v if v.startsWith(" 1.0" ) =>
105
- Seq (
106
- MimaBuild .excludeSparkPackage(" api.java" ),
107
- MimaBuild .excludeSparkPackage(" mllib" ),
108
- MimaBuild .excludeSparkPackage(" streaming" )
109
- ) ++
110
- MimaBuild .excludeSparkClass(" rdd.ClassTags" ) ++
111
- MimaBuild .excludeSparkClass(" util.XORShiftRandom" ) ++
112
- MimaBuild .excludeSparkClass(" graphx.EdgeRDD" ) ++
113
- MimaBuild .excludeSparkClass(" graphx.VertexRDD" ) ++
114
- MimaBuild .excludeSparkClass(" graphx.impl.GraphImpl" ) ++
115
- MimaBuild .excludeSparkClass(" graphx.impl.RoutingTable" ) ++
116
- MimaBuild .excludeSparkClass(" graphx.util.collection.PrimitiveKeyOpenHashMap" ) ++
117
- MimaBuild .excludeSparkClass(" graphx.util.collection.GraphXPrimitiveKeyOpenHashMap" ) ++
118
- MimaBuild .excludeSparkClass(" mllib.recommendation.MFDataGenerator" ) ++
119
- MimaBuild .excludeSparkClass(" mllib.optimization.SquaredGradient" ) ++
120
- MimaBuild .excludeSparkClass(" mllib.regression.RidgeRegressionWithSGD" ) ++
121
- MimaBuild .excludeSparkClass(" mllib.regression.LassoWithSGD" ) ++
122
- MimaBuild .excludeSparkClass(" mllib.regression.LinearRegressionWithSGD" )
123
- case _ => Seq ()
124
- }
125
-
126
- private val closures = Seq (
127
- " org.apache.spark.rdd.RDD.org$apache$spark$rdd$RDD$$mergeMaps$1" ,
128
- " org.apache.spark.rdd.RDD.org$apache$spark$rdd$RDD$$countPartition$1" ,
129
- " org.apache.spark.rdd.RDD.org$apache$spark$rdd$RDD$$distributePartition$1" ,
130
- " org.apache.spark.rdd.PairRDDFunctions.org$apache$spark$rdd$PairRDDFunctions$$mergeValue$1" ,
131
- " org.apache.spark.rdd.PairRDDFunctions.org$apache$spark$rdd$PairRDDFunctions$$writeToFile$1" ,
132
- " org.apache.spark.rdd.PairRDDFunctions.org$apache$spark$rdd$PairRDDFunctions$$reducePartition$1" ,
133
- " org.apache.spark.rdd.PairRDDFunctions.org$apache$spark$rdd$PairRDDFunctions$$writeShard$1" ,
134
- " org.apache.spark.rdd.PairRDDFunctions.org$apache$spark$rdd$PairRDDFunctions$$mergeCombiners$1" ,
135
- " org.apache.spark.rdd.PairRDDFunctions.org$apache$spark$rdd$PairRDDFunctions$$process$1" ,
136
- " org.apache.spark.rdd.PairRDDFunctions.org$apache$spark$rdd$PairRDDFunctions$$createCombiner$1" ,
137
- " org.apache.spark.rdd.PairRDDFunctions.org$apache$spark$rdd$PairRDDFunctions$$mergeMaps$1"
138
- )
35
+ def excludes (version : String ) =
36
+ version match {
37
+ case v if v.startsWith(" 1.1" ) =>
38
+ Seq (
39
+ MimaBuild .excludeSparkPackage(" deploy" ),
40
+ MimaBuild .excludeSparkPackage(" graphx" )
41
+ ) ++
42
+ Seq (
43
+ // Adding new method to JavaRDLike trait - we should probably mark this as a developer API.
44
+ ProblemFilters .exclude[MissingMethodProblem ](" org.apache.spark.api.java.JavaRDDLike.partitions" ),
45
+ // We made a mistake earlier (ed06500d3) in the Java API to use default parameter values
46
+ // for countApproxDistinct* functions, which does not work in Java. We later removed
47
+ // them, and use the following to tell Mima to not care about them.
48
+ ProblemFilters .exclude[IncompatibleResultTypeProblem ](
49
+ " org.apache.spark.api.java.JavaPairRDD.countApproxDistinctByKey" ),
50
+ ProblemFilters .exclude[IncompatibleResultTypeProblem ](
51
+ " org.apache.spark.api.java.JavaPairRDD.countApproxDistinctByKey" ),
52
+ ProblemFilters .exclude[MissingMethodProblem ](
53
+ " org.apache.spark.api.java.JavaPairRDD.countApproxDistinct$default$1" ),
54
+ ProblemFilters .exclude[MissingMethodProblem ](
55
+ " org.apache.spark.api.java.JavaPairRDD.countApproxDistinctByKey$default$1" ),
56
+ ProblemFilters .exclude[MissingMethodProblem ](
57
+ " org.apache.spark.api.java.JavaRDD.countApproxDistinct$default$1" ),
58
+ ProblemFilters .exclude[MissingMethodProblem ](
59
+ " org.apache.spark.api.java.JavaRDDLike.countApproxDistinct$default$1" ),
60
+ ProblemFilters .exclude[MissingMethodProblem ](
61
+ " org.apache.spark.api.java.JavaDoubleRDD.countApproxDistinct$default$1" ),
62
+ ProblemFilters .exclude[MissingMethodProblem ](
63
+ " org.apache.spark.storage.MemoryStore.Entry" )
64
+ ) ++
65
+ Seq (
66
+ ProblemFilters .exclude[MissingMethodProblem ](" org.apache.spark.streaming.flume.FlumeReceiver.this" )
67
+ ) ++
68
+ Seq ( // Ignore some private methods in ALS.
69
+ ProblemFilters .exclude[MissingMethodProblem ](
70
+ " org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$^dateFeatures" ),
71
+ ProblemFilters .exclude[MissingMethodProblem ]( // The only public constructor is the one without arguments.
72
+ " org.apache.spark.mllib.recommendation.ALS.this" ),
73
+ ProblemFilters .exclude[MissingMethodProblem ](
74
+ " org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$$<init>$default$7" ),
75
+ ProblemFilters .exclude[IncompatibleMethTypeProblem ](
76
+ " org.apache.spark.mllib.recommendation.ALS.org$apache$spark$mllib$recommendation$ALS$^dateFeatures" )
77
+ ) ++
78
+ MimaBuild .excludeSparkClass(" mllib.linalg.distributed.ColumnStatisticsAggregator" ) ++
79
+ MimaBuild .excludeSparkClass(" rdd.ZippedRDD" ) ++
80
+ MimaBuild .excludeSparkClass(" rdd.ZippedPartition" ) ++
81
+ MimaBuild .excludeSparkClass(" util.SerializableHyperLogLog" ) ++
82
+ MimaBuild .excludeSparkClass(" storage.Values" ) ++
83
+ MimaBuild .excludeSparkClass(" storage.Entry" ) ++
84
+ MimaBuild .excludeSparkClass(" storage.MemoryStore$Entry" ) ++
85
+ Seq (
86
+ ProblemFilters .exclude[IncompatibleMethTypeProblem ](
87
+ " org.apache.spark.mllib.tree.impurity.Gini.calculate" ),
88
+ ProblemFilters .exclude[IncompatibleMethTypeProblem ](
89
+ " org.apache.spark.mllib.tree.impurity.Entropy.calculate" ),
90
+ ProblemFilters .exclude[IncompatibleMethTypeProblem ](
91
+ " org.apache.spark.mllib.tree.impurity.Variance.calculate" )
92
+ )
93
+ case v if v.startsWith(" 1.0" ) =>
94
+ Seq (
95
+ MimaBuild .excludeSparkPackage(" api.java" ),
96
+ MimaBuild .excludeSparkPackage(" mllib" ),
97
+ MimaBuild .excludeSparkPackage(" streaming" )
98
+ ) ++
99
+ MimaBuild .excludeSparkClass(" rdd.ClassTags" ) ++
100
+ MimaBuild .excludeSparkClass(" util.XORShiftRandom" ) ++
101
+ MimaBuild .excludeSparkClass(" graphx.EdgeRDD" ) ++
102
+ MimaBuild .excludeSparkClass(" graphx.VertexRDD" ) ++
103
+ MimaBuild .excludeSparkClass(" graphx.impl.GraphImpl" ) ++
104
+ MimaBuild .excludeSparkClass(" graphx.impl.RoutingTable" ) ++
105
+ MimaBuild .excludeSparkClass(" graphx.util.collection.PrimitiveKeyOpenHashMap" ) ++
106
+ MimaBuild .excludeSparkClass(" graphx.util.collection.GraphXPrimitiveKeyOpenHashMap" ) ++
107
+ MimaBuild .excludeSparkClass(" mllib.recommendation.MFDataGenerator" ) ++
108
+ MimaBuild .excludeSparkClass(" mllib.optimization.SquaredGradient" ) ++
109
+ MimaBuild .excludeSparkClass(" mllib.regression.RidgeRegressionWithSGD" ) ++
110
+ MimaBuild .excludeSparkClass(" mllib.regression.LassoWithSGD" ) ++
111
+ MimaBuild .excludeSparkClass(" mllib.regression.LinearRegressionWithSGD" )
112
+ case _ => Seq ()
113
+ }
139
114
}
0 commit comments