diff --git a/nifi-commons/nifi-metrics/pom.xml b/nifi-commons/nifi-metrics/pom.xml
index 84e108671e6c..41d8280e0f93 100644
--- a/nifi-commons/nifi-metrics/pom.xml
+++ b/nifi-commons/nifi-metrics/pom.xml
@@ -23,42 +23,6 @@
nifi-metrics
-
-
-
- org.codehaus.mojo
- build-helper-maven-plugin
- 1.5
-
-
- add-source
- generate-sources
-
- add-source
-
-
-
- src/main/groovy
-
-
-
-
- add-test-source
- generate-test-sources
-
- add-test-source
-
-
-
- src/test/groovy
-
-
-
-
-
-
-
-
org.apache.nifi
@@ -70,15 +34,5 @@
metrics-jvm
4.1.36
-
- org.spockframework
- spock-core
- test
-
-
- org.codehaus.groovy
- groovy-test
- test
-
diff --git a/nifi-commons/nifi-metrics/src/test/groovy/org/apache/nifi/metrics/jvm/JmxJvmMetricsSpec.groovy b/nifi-commons/nifi-metrics/src/test/groovy/org/apache/nifi/metrics/jvm/JmxJvmMetricsSpec.groovy
deleted file mode 100644
index f503cf5db6d2..000000000000
--- a/nifi-commons/nifi-metrics/src/test/groovy/org/apache/nifi/metrics/jvm/JmxJvmMetricsSpec.groovy
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.metrics.jvm
-
-import org.apache.nifi.processor.DataUnit
-import spock.lang.Specification
-import spock.lang.Unroll
-
-class JmxJvmMetricsSpec extends Specification {
-
- @Unroll
- def "Get numeric metric for #metricName via method #methodName"() {
- given:
- def jmxJvmMetrics = JmxJvmMetrics.instance
-
- when:
- def metricResult = jmxJvmMetrics."$methodName"(dataUnit).doubleValue()
-
- then:
- noExceptionThrown()
- metricResult != -1
-
-
- where:
- metricName | methodName | dataUnit
- "memory.total.init" | "totalInit" | DataUnit.B
- "memory.total.used" | "totalUsed" | DataUnit.B
- "memory.total.max" | "totalMax" | DataUnit.B
- "memory.total.committed" | "totalCommitted" | DataUnit.B
- "memory.heap.init" | "heapInit" | DataUnit.B
- "memory.heap.used" | "heapUsed" | DataUnit.B
- "memory.heap.max" | "heapMax" | DataUnit.B
- "memory.heap.committed" | "heapCommitted" | DataUnit.B
- "memory.total.init" | "totalInit" | DataUnit.B
- "memory.total.init" | "totalInit" | DataUnit.B
- "memory.total.init" | "totalInit" | DataUnit.B
- }
-
- @Unroll
- def "Get percentage metric for #metricName via method #methodName"() {
- given:
- def jmxJvmMetrics = JmxJvmMetrics.instance
-
- when:
- def metricResult = jmxJvmMetrics."$methodName"()
-
- then:
- noExceptionThrown()
- metricResult instanceof Double
- metricResult != 0.0
-
- where:
- metricName | methodName
- "memory.heap.usage" | "heapUsage"
- "memory.non-heap.usage" | "nonHeapUsage"
- "os.filedescriptor.usage" | "fileDescriptorUsage"
- }
-
- def "Memory pool metric names exist"() {
- given:
- def jmxJvmMetrics = JmxJvmMetrics.instance
-
- when:
- def names = jmxJvmMetrics.getMetricNames(JmxJvmMetrics.REGISTRY_METRICSET_MEMORY + ".pools")
-
- then:
- names.size() > 0
- }
-
- @Unroll
- def "Get string map metric for #metricName via method #methodName"() {
- given:
- def jmxJvmMetrics = JmxJvmMetrics.instance
-
- when:
- def metricResult = jmxJvmMetrics."$methodName"()
-
- then:
- noExceptionThrown()
- metricResult.keySet().size() > 0
-
- where:
- metricName | methodName
- "memory.pools.usage" | "memoryPoolUsage"
- "garbage-collectors" | "garbageCollectors"
- }
-}
diff --git a/nifi-commons/nifi-site-to-site-client/pom.xml b/nifi-commons/nifi-site-to-site-client/pom.xml
index 25d539208f8f..69d80f324a6a 100644
--- a/nifi-commons/nifi-site-to-site-client/pom.xml
+++ b/nifi-commons/nifi-site-to-site-client/pom.xml
@@ -116,11 +116,6 @@
1.1.2
test
-
- org.codehaus.groovy
- groovy-test
- test
-
javax.validation
validation-api
diff --git a/nifi-commons/nifi-site-to-site-client/src/test/groovy/org/apache/nifi/remote/client/PeerSelectorTest.groovy b/nifi-commons/nifi-site-to-site-client/src/test/groovy/org/apache/nifi/remote/client/PeerSelectorTest.groovy
deleted file mode 100644
index fc1c76535ab4..000000000000
--- a/nifi-commons/nifi-site-to-site-client/src/test/groovy/org/apache/nifi/remote/client/PeerSelectorTest.groovy
+++ /dev/null
@@ -1,1131 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.remote.client
-
-import org.apache.nifi.remote.PeerDescription
-import org.apache.nifi.remote.PeerStatus
-import org.apache.nifi.remote.TransferDirection
-import org.apache.nifi.remote.protocol.SiteToSiteTransportProtocol
-import org.apache.nifi.remote.util.PeerStatusCache
-import org.junit.jupiter.api.BeforeEach
-import org.junit.jupiter.api.Test
-import org.slf4j.Logger
-import org.slf4j.LoggerFactory
-
-import java.util.concurrent.ArrayBlockingQueue
-
-import static org.junit.jupiter.api.Assertions.assertEquals
-import static org.junit.jupiter.api.Assertions.assertNotNull
-import static org.junit.jupiter.api.Assertions.assertNull
-import static org.junit.jupiter.api.Assertions.assertTrue
-
-class PeerSelectorTest {
- private static final Logger logger = LoggerFactory.getLogger(PeerSelectorTest.class)
-
- private static final BOOTSTRAP_PEER_DESCRIPTION = new PeerDescription("localhost", -1, false)
- private static final List DEFAULT_NODES = ["node1.nifi", "node2.nifi", "node3.nifi"]
- private static final String DEFAULT_REMOTE_INSTANCE_URIS = buildRemoteInstanceUris(DEFAULT_NODES)
- private static final Set DEFAULT_PEER_STATUSES = buildPeerStatuses(DEFAULT_NODES)
- private static final Set DEFAULT_PEER_DESCRIPTIONS = DEFAULT_PEER_STATUSES*.peerDescription
- private static final Map> DEFAULT_PEER_NODES = buildPeersMap(DEFAULT_PEER_STATUSES)
-
- // Default collaborators
- private static mockPSP
- private static mockPP
-
- @BeforeEach
- void setUp() {
- // Mock collaborators
- mockPSP = mockPeerStatusProvider()
- mockPP = mockPeerPersistence()
- }
-
- private static String buildRemoteInstanceUris(List nodes = DEFAULT_NODES) {
- String remoteInstanceUris = "http://" + nodes.join(":8443/nifi-api,http://") + ":8443/nifi-api";
- remoteInstanceUris
- }
-
- private static Set buildPeerStatuses(List nodes = DEFAULT_NODES) {
- Set nodePeerDescriptions = nodes.collect { String nodeHostname ->
- new PeerDescription(nodeHostname, -1, false)
- }
-
- Set peerStatuses = nodePeerDescriptions.collect { PeerDescription pd ->
- new PeerStatus(pd, 0, true)
- }
- peerStatuses
- }
-
- /**
- * Returns a map representing the cluster architecture formed by each hostname having the provided number of flowfiles.
- *
- * @param peersWithFlowfiles a map of hostnames to flowfile counts
- * @return the map with formed objects like PeerStatus and PeerDescription
- */
- private static Map buildCluster(Map peersWithFlowfiles = [:]) {
- peersWithFlowfiles.collectEntries { String hostname, Integer flowfileCount ->
- [new PeerStatus(new PeerDescription(hostname, -1, false), flowfileCount, true), flowfileCount]
- }
- }
-
- /**
- * Returns a map where each key (peer description) is aware of all of its peer nodes (peer statuses).
- *
- * @param peerStatuses the set of peer statuses
- * @return a map of PDs to sibling peers
- */
- private static Map> buildPeersMap(Set peerStatuses) {
- peerStatuses.collectEntries { PeerStatus ps ->
- [ps.peerDescription, peerStatuses.findAll { it.peerDescription.hostname != ps.peerDescription.hostname }]
- }
- }
-
- /**
- * Returns a map of nodes to expected percentage of flowfiles allocated to/from the node.
- *
- * @param nodes the map of nodes to current flowfile count
- * @param direction the transfer direction
- * @return the map of nodes to expected allocation
- */
- private static Map determineExpectedPercents(Map nodes, TransferDirection direction = TransferDirection.SEND) {
- long totalFFC = nodes.values().sum() as long
- nodes.collectEntries { name, ffc ->
- [name, PeerSelector.calculateNormalizedWeight(direction, totalFFC, ffc, nodes.size())]
- }
- }
-
- /**
- * Asserts that the provided frequency results are within {@code TOLERANCE} % of the expected values.
- *
- * @param resultsFrequency the map of node to invocations/hits
- * @param EXPECTED_PERCENTS the map of node to expected percent of hits
- * @param NUM_TIMES the total number of hits (defaults to the sum of all results)
- * @param TOLERANCE the tolerance for error (default 0.05 = 5%)
- */
- private static void assertDistributionPercentages(Map resultsFrequency,
- final Map EXPECTED_PERCENTS,
- final int NUM_TIMES = resultsFrequency.values().sum() as int,
- final double TOLERANCE = 0.05) {
- assertEquals(EXPECTED_PERCENTS.keySet(), resultsFrequency.keySet())
-
- logger.info(" Actual results: ${resultsFrequency.sort()}")
- logger.info("Expected results: ${EXPECTED_PERCENTS.sort().collect { k, v -> "${k}: ${v}%" }}")
-
- def max = resultsFrequency.max { a, b -> a.value <=> b.value }
- def min = resultsFrequency.min { a, b -> a.value <=> b.value }
- logger.info("Max: ${max.key} (${max.value}) | Min: ${min.key} (${min.value})")
- def realTolerance = TOLERANCE * NUM_TIMES
- logger.debug("Tolerance is measured as a percent of total flowfiles (${TOLERANCE * 100}% of ${NUM_TIMES} = ${realTolerance.round(2)})")
-
- // TODO: Change percentages to be percentage points of total for even comparison
- EXPECTED_PERCENTS.each { k, v ->
- def expectedCount = (v / 100) * NUM_TIMES
- def lowerBound = Math.max(0, (expectedCount - realTolerance).round(2))
- def upperBound = Math.min(NUM_TIMES, (expectedCount + realTolerance).round(2))
- def count = resultsFrequency[k]
- def difference = Math.abs(expectedCount - count) / NUM_TIMES
- logger.debug("Checking that ${count} is within ±${TOLERANCE * 100}% of ${expectedCount} (${lowerBound}, ${upperBound}) | ${(difference * 100).round(2)}%")
- assertTrue(count >= lowerBound && count <= upperBound)
- }
- }
-
- /**
- * Asserts that the last N peer selections do not have N-1 consecutive selections of the same peer, where N is the total peer count. This is a legacy requirement.
- *
- * @param recentPeerSelectionQueue the recently selected peers (the PeerQueue should have been initialized with N elements)
- * @param nextPeer the next peer
- */
- private static void assertConsecutiveSelections(PeerQueue recentPeerSelectionQueue, PeerStatus nextPeer) {
- recentPeerSelectionQueue.append(nextPeer.peerDescription.hostname)
- int consecutiveElements = recentPeerSelectionQueue.getMaxConsecutiveElements()
-// String mcce = recentPeerSelectionQueue.getMostCommonConsecutiveElement()
-// logger.debug("Most consecutive elements in recentPeerSelectionQueue: ${consecutiveElements} - ${mcce} | ${recentPeerSelectionQueue}")
- assertTrue(consecutiveElements <= recentPeerSelectionQueue.totalSize - 1)
- }
-
- private static double calculateMean(Map resultsFrequency) {
- int n = resultsFrequency.size()
- Object meanIndex = n % 2 == 0 ? (n / 2 - 1)..(n / 2) : (n / 2).intValue()
- List meanElements = resultsFrequency.values().sort()[meanIndex] as List
- return meanElements.sum() / meanElements.size()
- }
-
- private static PeerStatusProvider mockPeerStatusProvider(PeerDescription bootstrapPeerDescription = BOOTSTRAP_PEER_DESCRIPTION,
- String remoteInstanceUris = DEFAULT_REMOTE_INSTANCE_URIS,
- Map> peersMap = DEFAULT_PEER_NODES) {
- [getTransportProtocol : { ->
- SiteToSiteTransportProtocol.HTTP
- },
- getRemoteInstanceUris: { ->
- remoteInstanceUris
- },
- getBootstrapPeerDescription: { ->
- bootstrapPeerDescription
- },
- fetchRemotePeerStatuses : { PeerDescription pd ->
- peersMap[pd] ?: [] as Set
- }] as PeerStatusProvider
- }
-
- private static PeerPersistence mockPeerPersistence(String remoteInstanceUris = DEFAULT_REMOTE_INSTANCE_URIS, Set peerStatuses = DEFAULT_PEER_STATUSES) {
- [restore: { ->
- new PeerStatusCache(peerStatuses, System.currentTimeMillis(), remoteInstanceUris, SiteToSiteTransportProtocol.HTTP)
- },
- save : { PeerStatusCache psc ->
- }] as PeerPersistence
- }
-
- private static PeerSelector buildPeerSelectorForCluster(String scenarioName, Map nodes) {
- // Map the nodes to a cluster
- def clusterMap = buildCluster(nodes)
- logger.info("Using cluster map (${scenarioName}): ${clusterMap.collectEntries { k, v -> [k.peerDescription.hostname, v] }}")
-
- // Build a peer selector with this cluster
- PeerStatusProvider mockPSP = mockPeerStatusProvider(BOOTSTRAP_PEER_DESCRIPTION, DEFAULT_REMOTE_INSTANCE_URIS, buildPeersMap(clusterMap.keySet()))
- PeerPersistence mockPP = mockPeerPersistence(DEFAULT_REMOTE_INSTANCE_URIS, clusterMap.keySet())
-
- new PeerSelector(mockPSP, mockPP)
- }
-
- @Test
- void testGetPeersToQueryShouldBeEmpty() {
- // Arrange
-
- // Mock collaborators with empty data
- mockPSP = mockPeerStatusProvider(BOOTSTRAP_PEER_DESCRIPTION, "", [:])
- mockPP = mockPeerPersistence("", [] as Set)
-
- PeerSelector ps = new PeerSelector(mockPSP, mockPP)
-
- // Act
- def peersToQuery = ps.getPeersToQuery()
- logger.info("Retrieved ${peersToQuery.size()} peers to query: ${peersToQuery}")
-
- // Assert
- assertEquals(1, peersToQuery.size())
- assertEquals(BOOTSTRAP_PEER_DESCRIPTION, peersToQuery.first())
- }
-
- @Test
- void testShouldGetPeersToQuery() {
- // Arrange
- Set restoredPeerStatuses = buildPeerStatuses()
-
- // Mock collaborators
- mockPP = mockPeerPersistence(DEFAULT_REMOTE_INSTANCE_URIS, restoredPeerStatuses)
-
- PeerSelector ps = new PeerSelector(mockPSP, mockPP)
-
- // Act
- def peersToQuery = ps.getPeersToQuery()
- logger.info("Retrieved ${peersToQuery.size()} peers to query: ${peersToQuery}")
-
- // Assert
- assertEquals(restoredPeerStatuses.size() + 1, peersToQuery.size())
- assertTrue(peersToQuery.contains(BOOTSTRAP_PEER_DESCRIPTION))
- assertTrue(peersToQuery.containsAll(DEFAULT_PEER_DESCRIPTIONS))
- }
-
- /**
- * Asserts that calling the {@code #getPeersToQuery( )} method repeatedly provides the same result because it does not modify {@code lastFetchedQueryablePeers} directly.
- *
- */
- @Test
- void testGetPeersToQueryShouldBeIdempotent() {
- // Arrange
- final int NUM_TIMES = 3
-
- PeerSelector ps = new PeerSelector(mockPSP, mockPP)
-
- // Act
- def peersToQuery = ps.getPeersToQuery()
- logger.info("Retrieved ${peersToQuery.size()} peers to query: ${peersToQuery}")
-
- def repeatedPeersToQuery = []
- NUM_TIMES.times { int i ->
- repeatedPeersToQuery << ps.getPeersToQuery()
- }
-
- // Assert
- assertEquals(DEFAULT_PEER_STATUSES.size() + 1, peersToQuery.size())
- assertTrue(peersToQuery.contains(BOOTSTRAP_PEER_DESCRIPTION))
- assertTrue(peersToQuery.containsAll(DEFAULT_PEER_DESCRIPTIONS))
-
- repeatedPeersToQuery.forEach(query -> assertEquals(peersToQuery, query))
- }
-
- @Test
- void testShouldFetchRemotePeerStatuses() {
- // Arrange
- PeerSelector ps = new PeerSelector(mockPSP, mockPP)
-
- // Act
- Set remotePeerStatuses = ps.fetchRemotePeerStatuses(DEFAULT_PEER_DESCRIPTIONS)
- logger.info("Retrieved ${remotePeerStatuses.size()} peer statuses: ${remotePeerStatuses}")
-
- // Assert
- assertEquals(DEFAULT_PEER_STATUSES.size(), remotePeerStatuses.size())
- assertTrue(remotePeerStatuses.containsAll(DEFAULT_PEER_STATUSES))
- }
-
- /**
- * Iterates through test scenarios of 100, 1000, and 10_000 total flowfiles and calculates the relative send and receive weights for every percentage.
- */
- @Test
- void testShouldCalculateNormalizedWeight() {
- // Arrange
- def results = [:]
-
- // Act
- [3, 5, 7].each { int nodeCount ->
- results["$nodeCount"] = [:]
- (2..4).each { int e ->
- int totalFlowfileCount = 10**e
- results["$nodeCount"]["$totalFlowfileCount"] = [:]
- def thisScenario = results["$nodeCount"]["$totalFlowfileCount"]
- logger.info("Running ${nodeCount} node scenario for ${totalFlowfileCount} total flowfiles")
- (0..100).each { int i ->
- int flowfileCount = (i / 100 * totalFlowfileCount).intValue()
- thisScenario["$flowfileCount"] = [:]
-
- double sendWeight = PeerSelector.calculateNormalizedWeight(TransferDirection.SEND, totalFlowfileCount, flowfileCount, nodeCount)
- double receiveWeight = PeerSelector.calculateNormalizedWeight(TransferDirection.RECEIVE, totalFlowfileCount, flowfileCount, nodeCount)
-
- thisScenario["$flowfileCount"]["send"] = sendWeight
- thisScenario["$flowfileCount"]["receive"] = receiveWeight
- }
- }
- }
-
- // Assert
- results.each { nodeCount, t ->
- t.each { total, r ->
- total = Integer.valueOf(total)
- logger.info("Results for ${nodeCount} nodes with ${total} flowfiles: ")
- logger.info(["Count", "Send", "Receive"].collect { it.padLeft(10, " ") }.join())
- int step = total / 10 as int
- (0..total).step(step).each { int n ->
- def data = r["$n"]
- def line = [n, data.send, data.receive].collect { (it as String).padLeft(10, " ") }.join()
- logger.debug(line)
- }
-
- // Assert that the send percentage is always between 0% and 80%
- r.every { k, v -> assertTrue(v.send >= 0 && v.send <= 80) }
-
- // Assert that the receive percentage is always between 0% and 100%
- r.every { k, v -> assertTrue(v.receive >= 0 && v.receive <= 100) }
- }
- }
- }
-
- /**
- * Iterates through test scenarios of 100, 1000, and 10_000 total flowfiles and calculates the relative send and receive weights for every percentage.
- */
- @Test
- void testShouldCalculateNormalizedWeightForSingleRemote() {
- // Arrange
- final int NODE_COUNT = 1
-
- // Act
- (2..4).each { int e ->
- int totalFlowfileCount = 10**e
- logger.info("Running single node scenario for ${totalFlowfileCount} total flowfiles")
- (0..100).each { int i ->
- int flowfileCount = (i / 100 * totalFlowfileCount).intValue()
- double sendWeight = PeerSelector.calculateNormalizedWeight(TransferDirection.SEND, totalFlowfileCount, flowfileCount, NODE_COUNT)
- double receiveWeight = PeerSelector.calculateNormalizedWeight(TransferDirection.RECEIVE, totalFlowfileCount, flowfileCount, NODE_COUNT)
-
- // Assert
- assertEquals(100, sendWeight)
- assertEquals(100, receiveWeight)
- }
- }
- }
-
- @Test
- void testShouldBuildWeightedPeerMapForSend() {
- // Arrange
- def nodes = ["node1.nifi": 20, "node2.nifi": 30, "node3.nifi": 50]
- def clusterMap = buildCluster(nodes)
-
- // Sort the map in ascending order by value (SEND)
- clusterMap = clusterMap.sort { e1, e2 -> e1.value <=> e2.value }
- logger.info("Using cluster map: ${clusterMap.collectEntries { k, v -> [k.peerDescription.hostname, v] }}")
-
- mockPSP = mockPeerStatusProvider(BOOTSTRAP_PEER_DESCRIPTION, DEFAULT_REMOTE_INSTANCE_URIS, buildPeersMap(clusterMap.keySet()))
- mockPP = mockPeerPersistence(DEFAULT_REMOTE_INSTANCE_URIS, clusterMap.keySet())
-
- PeerSelector ps = new PeerSelector(mockPSP, mockPP)
- Set peerStatuses = ps.getPeerStatuses()
-
- // Act
- LinkedHashMap weightedPeerMap = ps.buildWeightedPeerMap(peerStatuses, TransferDirection.SEND)
- logger.info("Weighted peer map: ${weightedPeerMap}")
-
- // Assert
- assertEquals(clusterMap.keySet(), weightedPeerMap.keySet())
- }
-
- @Test
- void testShouldBuildWeightedPeerMapForReceive() {
- // Arrange
- def nodes = ["node1.nifi": 20, "node2.nifi": 30, "node3.nifi": 50]
- def clusterMap = buildCluster(nodes)
-
- // Sort the map in descending order by value (RECEIVE)
- clusterMap = clusterMap.sort { e1, e2 -> e2.value <=> e1.value }
- logger.info("Using cluster map: ${clusterMap.collectEntries { k, v -> [k.peerDescription.hostname, v] }}")
-
- mockPSP = mockPeerStatusProvider(BOOTSTRAP_PEER_DESCRIPTION, DEFAULT_REMOTE_INSTANCE_URIS, buildPeersMap(clusterMap.keySet()))
- mockPP = mockPeerPersistence(DEFAULT_REMOTE_INSTANCE_URIS, clusterMap.keySet())
-
- PeerSelector ps = new PeerSelector(mockPSP, mockPP)
- Set peerStatuses = ps.getPeerStatuses()
-
- // Act
- LinkedHashMap weightedPeerMap = ps.buildWeightedPeerMap(peerStatuses, TransferDirection.RECEIVE)
- logger.info("Weighted peer map: ${weightedPeerMap}")
-
- // Assert
- assertEquals(clusterMap.keySet(), weightedPeerMap.keySet())
- }
-
- /**
- * This test ensures that regardless of the total flowfile count, the resulting map has
- * normalized weights (i.e. percentage of 100).
- */
- @Test
- void testCreateDestinationMapForSendShouldBeNormalized() {
- // Arrange
- def scenarios = [
- "100 ff 100/0/0" : ["node1.nifi": 100, "node2.nifi": 0, "node3.nifi": 0],
- "100 ff 50/50/0" : ["node1.nifi": 50, "node2.nifi": 50, "node3.nifi": 0],
- "100 ff 100/0" : ["node1.nifi": 100, "node2.nifi": 0],
- "1000 ff 200/300/500": ["node1.nifi": 200, "node2.nifi": 300, "node3.nifi": 500],
- "1000 ff 333/333/334": ["node1.nifi": 333, "node2.nifi": 333, "node3.nifi": 334],
- "1000 ff 0/250x4" : ["node1.nifi": 0, "node2.nifi": 250, "node3.nifi": 250, "node4.nifi": 250, "node5.nifi": 250],
- "1000 ff 142x7" : ((1..7).collectEntries { int i -> ["node${i}.nifi", 1000.intdiv(7)] }),
- "200 ff 151/1x49" : ["node1.nifi": 151] + ((2..50).collectEntries { int i -> ["node${i}.nifi", 1] })
- ]
-
- scenarios.each { String name, Map nodes ->
- PeerSelector ps = buildPeerSelectorForCluster(name, nodes)
- Set peerStatuses = ps.getPeerStatuses()
-
- // Check both SEND and RECEIVE
- TransferDirection.values().each { TransferDirection direction ->
- logger.info("Retrieving peers for ${direction} in scenario ${name}")
-
- // Act
- Map destinationMap = ps.createDestinationMap(peerStatuses, direction)
- logger.info("Destination map: ${destinationMap}")
-
- // Assert
- assertEquals(peerStatuses, destinationMap.keySet())
-
- // For uneven splits, the resulting percentage should be within +/- 1%
- def totalPercentage = destinationMap.values().sum()
- assertTrue(totalPercentage >= 99 && totalPercentage <= 100)
- }
- }
- }
-
- /**
- * Test the edge case where there is a rounding error and the selected random number is not captured in the buckets
- */
- @Test
- void testGetAvailablePeerStatusShouldHandleEdgeCase() {
- // Arrange
- final int NUM_TIMES = 10000
-
- def nodes = ["node1.nifi": 2, "node2.nifi": 1, "node3.nifi": 1]
-
- // Make a map where the weights are artificially suppressed and total far less than 100% to make the edge case more likely
- Map suppressedPercentageMap = buildPeerStatuses(new ArrayList(nodes.keySet())).collectEntries { [it, nodes[it.peerDescription.hostname] / 100.0 as double] }
-
- PeerSelector ps = buildPeerSelectorForCluster("edge case cluster", nodes)
-
- // Collect the results and analyze the resulting frequency distribution
- Map resultsFrequency = nodes.keySet().collectEntries { [it, 0] }
-
- // Act
- NUM_TIMES.times { int i ->
- def nextPeer = ps.getAvailablePeerStatus(suppressedPercentageMap)
-// logger.debug("${(i as String).padLeft(Math.log10(NUM_TIMES).intValue())}: ${nextPeer.peerDescription.hostname}")
- resultsFrequency[nextPeer.peerDescription.hostname]++
- }
- logger.info("Peer frequency results (${NUM_TIMES}): ${resultsFrequency}")
-
- // Assert
-
- // The actual distribution would be 50/25/25
- final Map EXPECTED_PERCENTS = ["node1.nifi": 50.0, "node2.nifi": 25.0, "node3.nifi": 25.0]
-
- assertDistributionPercentages(resultsFrequency, EXPECTED_PERCENTS, NUM_TIMES, 0.05)
- }
-
- @Test
- void testShouldGetNextPeer() {
- // Arrange
- final int NUM_TIMES = 10000
-
- def nodes = ["node1.nifi": 20, "node2.nifi": 30, "node3.nifi": 50]
-
- // Check both SEND and RECEIVE
- TransferDirection.values().each { TransferDirection direction ->
- logger.info("Selecting ${NUM_TIMES} peers for ${direction}")
-
- PeerSelector ps = buildPeerSelectorForCluster("100 ff 20/30/50", nodes)
-
- // Collect the results and analyze the resulting frequency distribution
- Map resultsFrequency = nodes.keySet().collectEntries { [it, 0] }
-
- // Act
- NUM_TIMES.times { int i ->
- def nextPeer = ps.getNextPeerStatus(direction)
-// logger.debug("${(i as String).padLeft(Math.log10(NUM_TIMES).intValue())}: ${nextPeer.peerDescription.hostname}")
- resultsFrequency[nextPeer.peerDescription.hostname]++
- }
- logger.info("Peer frequency results (${NUM_TIMES}): ${resultsFrequency}")
-
- // Assert
- final Map EXPECTED_PERCENTS = determineExpectedPercents(nodes, direction)
- assertDistributionPercentages(resultsFrequency, EXPECTED_PERCENTS, NUM_TIMES)
- }
- }
-
- /**
- * When the cluster is balanced, the consecutive selection of peers should not repeat {@code cluster.size( ) - 1} times.
- */
- @Test
- void testGetNextPeerShouldNotRepeatPeersOnBalancedCluster() {
- // Arrange
- final int NUM_TIMES = 10000
-
- def nodes = ((1..10).collectEntries { int i -> ["node${i}.nifi".toString(), 100] })
- PeerSelector ps = buildPeerSelectorForCluster("1000 ff 100x10", nodes)
-
- // Check both SEND and RECEIVE
- TransferDirection.values().each { TransferDirection direction ->
- logger.info("Selecting ${NUM_TIMES} peers for ${direction}")
-
- // Collect the results and analyze the resulting frequency distribution
- def resultsFrequency = nodes.keySet().collectEntries { [it, 0] }
-
- // Use the queue to track recent peers and observe repeated selections
- PeerQueue lastN = new PeerQueue(nodes.size())
-
- // Act
- NUM_TIMES.times { int i ->
- def nextPeer = ps.getNextPeerStatus(direction)
- resultsFrequency[nextPeer.peerDescription.hostname]++
-
- // Assert the consecutive selections are ok
- assertConsecutiveSelections(lastN, nextPeer)
- }
-
- // Assert
- final def EXPECTED_PERCENTS = nodes.collectEntries { [it.key, 10.0] }
-
- // The tolerance should be a bit higher because of the high number of nodes and even distribution
- assertDistributionPercentages(resultsFrequency, EXPECTED_PERCENTS, NUM_TIMES, 0.10)
- }
- }
-
- /**
- * When a remote has only one valid peer, that peer should be selected every time
- */
- @Test
- void testGetNextPeerShouldRepeatPeersOnSingleValidDestination() {
- // Arrange
- final int NUM_TIMES = 100
-
- // Single destination scenarios
- def scenarios = [
- "single node" : ["node1.nifi": 100],
- "single empty node": ["node1.nifi": 0],
- "100 ff 100/0" : ["node1.nifi": 100, "node2.nifi": 0],
- ]
-
- scenarios.each { String name, Map nodes ->
- PeerSelector ps = buildPeerSelectorForCluster(name, nodes)
-
- // Check both SEND and RECEIVE
- TransferDirection.values().each { TransferDirection direction ->
- logger.info("Selecting ${NUM_TIMES} peers for ${direction} in scenario ${name}")
-
- // Collect the results and analyze the resulting frequency distribution
- def resultsFrequency = nodes.keySet().collectEntries { [it, 0] }
-
- // Use the queue to track recent peers and observe repeated selections
- PeerQueue lastN = new PeerQueue(nodes.size())
-
- // Act
- NUM_TIMES.times { int i ->
- def nextPeer = ps.getNextPeerStatus(direction)
- resultsFrequency[nextPeer.peerDescription.hostname]++
-
- // Assert the consecutive selections are ok (i.e. it IS selecting the same peer repeatedly)
- if (lastN.remainingCapacity() == 0) {
- lastN.remove()
- }
- lastN.put(nextPeer.peerDescription.hostname)
-
- // Spot check consecutive selection
- if (i % 10 == 0) {
- int consecutiveElements = lastN.getMaxConsecutiveElements()
- assertEquals(lastN.size(), consecutiveElements)
- }
- }
-
- // Assert
- final def EXPECTED_PERCENTS = determineExpectedPercents(nodes, direction)
- logger.info("Expected percentages for ${name}: ${EXPECTED_PERCENTS}")
-
- // The tolerance should be zero; exact matches only
- assertDistributionPercentages(resultsFrequency, EXPECTED_PERCENTS, NUM_TIMES, 0.00)
- }
- }
- }
-
- /**
- * The legacy requirement that the next peer not repeat N-1 times where N is the size of the remote cluster does not apply to the following scenarios:
- *
- * * A remote of size <= 3
- * * An unbalanced remote (33/33/33/0) should repeat the last peer multiple times
- */
- @Test
- void testGetNextPeerShouldRepeatPeersOnUnbalancedCluster() {
- // Arrange
-
- // Using a higher iteration count smooths out outliers
- final int NUM_TIMES = 10000
-
- // Scenarios where consecutively-selected peers are expected to sometimes repeat (small clusters, uneven clusters)
- def scenarios = [
- "100 ff 50/50" : ["node1.nifi": 50, "node2.nifi": 50],
- "100 ff 75/25" : ["node1.nifi": 75, "node2.nifi": 25],
- "100 ff 50/50/0" : ["node1.nifi": 50, "node2.nifi": 50, "node3.nifi": 0],
- "1000 ff 800/200/0" : ["node1.nifi": 800, "node2.nifi": 200, "node3.nifi": 0],
- "10 ff 8/2/0" : ["node1.nifi": 8, "node2.nifi": 2, "node3.nifi": 0],
- "200 ff 66x3/0" : ["node1.nifi": 66, "node2.nifi": 66, "node3.nifi": 66, "node4.nifi": 0],
- "1000 ff 0/250x4" : ["node1.nifi": 0, "node2.nifi": 250, "node3.nifi": 250, "node4.nifi": 250, "node5.nifi": 250],
- "1000 ff 0/111x9" : ["node1.nifi": 0] + ((2..10).collectEntries { ["node${it}.nifi".toString(), 111] }),
- "legacy 1024/10240/4096x3": ["node1.nifi": 1024, "node2.nifi": 10240] + (3..5).collectEntries { ["node${it}.nifi".toString(), 4096] },
- "legacy 50k/500" : ["node1.nifi": 50_000, "node2.nifi": 50],
- ]
-
- scenarios.each { String name, Map nodes ->
- PeerSelector ps = buildPeerSelectorForCluster(name, nodes)
-
- // Check both SEND and RECEIVE
- TransferDirection.values().each { TransferDirection direction ->
- logger.info("Selecting ${NUM_TIMES} peers for ${direction} in scenario ${name}")
-
- // Collect the results and analyze the resulting frequency distribution
- def resultsFrequency = nodes.keySet().collectEntries { [it, 0] }
- logger.debug("Initialized results map to ${resultsFrequency}")
-
- // Use the queue to track recent peers and observe repeated selections
- PeerQueue lastN = new PeerQueue(nodes.size())
-
- // Act
- NUM_TIMES.times { int i ->
- def nextPeer = ps.getNextPeerStatus(direction)
-// logger.debug("${(i as String).padLeft(Math.log10(NUM_TIMES).intValue())}: ${nextPeer.peerDescription.hostname}")
- resultsFrequency[nextPeer.peerDescription.hostname]++
-
- // Assert the consecutive selections are ok (i.e. it IS selecting the same peer repeatedly)
- if (lastN.remainingCapacity() == 0) {
- lastN.remove()
- }
- lastN.put(nextPeer.peerDescription.hostname)
-
- int consecutiveElements = lastN.getMaxConsecutiveElements()
- if (consecutiveElements == nodes.size() && nodes.size() > 3) {
- logger.debug("Most consecutive elements in recentPeerSelectionQueue: ${consecutiveElements} | ${lastN}")
- }
- }
-
- // Assert
- final def EXPECTED_PERCENTS = determineExpectedPercents(nodes, direction)
- logger.info("Expected percentages for ${name}: ${EXPECTED_PERCENTS}")
-
- assertDistributionPercentages(resultsFrequency, EXPECTED_PERCENTS, NUM_TIMES)
- }
- }
- }
-
- /**
- * Test the edge case where peers are penalized
- */
- @Test
- void testGetAvailablePeerStatusShouldHandlePenalizedPeers() {
- // Arrange
- final int NUM_TIMES = 100
-
- // Should prefer node1, but it will be penalized
- def nodes = ["node1.nifi": 10, "node2.nifi": 90]
-
- // Make a map where the weights are normal
- def peerStatuses = buildPeerStatuses(new ArrayList(nodes.keySet()))
- Map weightMap = peerStatuses.collectEntries { [it, nodes[it.peerDescription.hostname] as double] }
-
- PeerSelector ps = buildPeerSelectorForCluster("penalized peer", nodes)
-
- // Penalize node1
- ps.penalize(peerStatuses.sort().first().peerDescription, 10_000)
-
- // Collect the results and analyze the resulting frequency distribution
- Map resultsFrequency = nodes.keySet().collectEntries { [it, 0] }
-
- // Act
- NUM_TIMES.times { int i ->
- def nextPeer = ps.getAvailablePeerStatus(weightMap)
-// logger.debug("${(i as String).padLeft(Math.log10(NUM_TIMES).intValue())}: ${nextPeer.peerDescription.hostname}")
- resultsFrequency[nextPeer.peerDescription.hostname]++
- }
- logger.info("Peer frequency results (${NUM_TIMES}): ${resultsFrequency}")
-
- // Assert
-
- // The actual distribution would be .9/.1, but because of the penalization, all selections will be node2
- final Map EXPECTED_PERCENTS = ["node1.nifi": 0.0, "node2.nifi": 100.0]
-
- // The tolerance should be very tight as this will be almost exact every time
- assertDistributionPercentages(resultsFrequency, EXPECTED_PERCENTS, NUM_TIMES, 0.00)
- }
-
- /**
- * Test the edge case where peers are penalized
- */
- @Test
- void testGetAvailablePeerStatusShouldHandleMultiplePenalizedPeers() {
- // Arrange
- final int NUM_TIMES = 10_000
-
- // Should distribute evenly, but 1/2 of the nodes will be penalized
- def nodes = ["node1.nifi": 25, "node2.nifi": 25, "node3.nifi": 25, "node4.nifi": 25]
-
- // Make a map where the weights are normal
- def peerStatuses = buildPeerStatuses(new ArrayList(nodes.keySet()))
- Map weightMap = peerStatuses.collectEntries { [it, nodes[it.peerDescription.hostname] as double] }
-
- PeerSelector ps = buildPeerSelectorForCluster("penalized peers", nodes)
-
- // Penalize node1 & node3
- def penalizedPeerStatuses = peerStatuses.findAll { ["node1.nifi", "node3.nifi"].contains(it.peerDescription.hostname) }
- penalizedPeerStatuses.each { ps.penalize(it.peerDescription, 10_000) }
-
- // Collect the results and analyze the resulting frequency distribution
- Map resultsFrequency = nodes.keySet().collectEntries { [it, 0] }
-
- // Act
- NUM_TIMES.times { int i ->
- def nextPeer = ps.getAvailablePeerStatus(weightMap)
-// logger.debug("${(i as String).padLeft(Math.log10(NUM_TIMES).intValue())}: ${nextPeer.peerDescription.hostname}")
- resultsFrequency[nextPeer.peerDescription.hostname]++
- }
- logger.info("Peer frequency results (${NUM_TIMES}): ${resultsFrequency}")
-
- // Assert
-
- // The actual distribution would be .25 * 4, but because of the penalization, node2 and node4 will each have ~50%
- final Map EXPECTED_PERCENTS = ["node1.nifi": 0.0, "node2.nifi": 50.0, "node3.nifi": 0.0, "node4.nifi": 50.0]
-
- assertDistributionPercentages(resultsFrequency, EXPECTED_PERCENTS, NUM_TIMES, 0.05)
- }
-
- // Copied legacy tests from TestPeerSelector
-
- /**
- * Test that the cache is the source of peer statuses initially
- */
- @Test
- void testInitializationShouldRestorePeerStatusFileCache() {
- // Arrange
- def nodes = DEFAULT_NODES
- def peerStatuses = DEFAULT_PEER_STATUSES
-
- // Create the peer status provider
- mockPSP = mockPeerStatusProvider()
-
- // Point to the persisted cache on disk
- final File cacheFile = File.createTempFile("peers", "txt")
- cacheFile.deleteOnExit()
-
- // Construct the cache contents and write to disk
- final String CACHE_CONTENTS = "${mockPSP.getTransportProtocol()}\n" +
- "${AbstractPeerPersistence.REMOTE_INSTANCE_URIS_PREFIX}${mockPSP.getRemoteInstanceUris()}\n" + peerStatuses.collect { PeerStatus ps ->
- [ps.peerDescription.hostname, ps.peerDescription.port, ps.peerDescription.isSecure(), ps.isQueryForPeers()].join(":")
- }.join("\n")
- cacheFile.text = CACHE_CONTENTS
-
- FilePeerPersistence filePP = new FilePeerPersistence(cacheFile)
-
- // Act
-
- // The constructor should restore the initial cache
- PeerSelector ps = new PeerSelector(mockPSP, filePP)
-
- // PeerSelector should access peer statuses from cache
- def peersToQuery = ps.getPeersToQuery()
- logger.info("Retrieved ${peersToQuery.size()} peers to query: ${peersToQuery}")
-
- // Assert
- assertEquals(nodes.size() + 1, peersToQuery.size())
- assertTrue(peersToQuery.contains(BOOTSTRAP_PEER_DESCRIPTION))
- assertTrue(peersToQuery.containsAll(DEFAULT_PEER_DESCRIPTIONS))
- }
-
- /**
- * Test that if the cache is expired, it is not used
- */
- @Test
- void testRefreshShouldHandleExpiredPeerStatusFileCache() {
- // Arrange
- def nodes = DEFAULT_NODES
- def peerStatuses = DEFAULT_PEER_STATUSES
- def remoteInstanceUris = buildRemoteInstanceUris(nodes)
-
- // Create the peer status provider with no actual remote peers
- mockPSP = mockPeerStatusProvider(BOOTSTRAP_PEER_DESCRIPTION, remoteInstanceUris, [:])
-
- // Point to the persisted cache on disk
- final File cacheFile = File.createTempFile("peers", "txt")
- cacheFile.deleteOnExit()
-
- // Construct the cache contents and write to disk
- final String CACHE_CONTENTS = "${mockPSP.getTransportProtocol()}\n" +
- "${AbstractPeerPersistence.REMOTE_INSTANCE_URIS_PREFIX}${mockPSP.getRemoteInstanceUris()}\n" + peerStatuses.collect { PeerStatus ps ->
- [ps.peerDescription.hostname, ps.peerDescription.port, ps.peerDescription.isSecure(), ps.isQueryForPeers()].join(":")
- }.join("\n")
- cacheFile.text = CACHE_CONTENTS
-
- // Mark the file as expired
- cacheFile.lastModified = System.currentTimeMillis() - (PeerSelector.PEER_CACHE_MILLIS * 2)
-
- FilePeerPersistence filePP = new FilePeerPersistence(cacheFile)
-
- // Act
-
- // The constructor should restore the initial cache
- PeerSelector ps = new PeerSelector(mockPSP, filePP)
-
- // Assert
-
- // The loaded cache should be marked as expired and not used
- assertTrue(ps.isCacheExpired(ps.peerStatusCache))
-
- // This internal method does not refresh or check expiration
- def peersToQuery = ps.getPeersToQuery()
- logger.info("Retrieved ${peersToQuery.size()} peers to query: ${peersToQuery}")
-
- // The cache has (expired) peer statuses present
- assertEquals(nodes.size() + 1, peersToQuery.size())
- assertTrue(peersToQuery.contains(BOOTSTRAP_PEER_DESCRIPTION))
- assertTrue(peersToQuery.containsAll(DEFAULT_PEER_DESCRIPTIONS))
-
- // Trigger the cache expiration detection
- ps.refresh()
-
- peersToQuery = ps.getPeersToQuery()
- logger.info("After cache expiration, retrieved ${peersToQuery.size()} peers to query: ${peersToQuery}")
-
- // The cache only contains the bootstrap node
- assertEquals(1, peersToQuery.size())
- assertTrue(peersToQuery.contains(BOOTSTRAP_PEER_DESCRIPTION))
- }
-
- Throwable generateException(String message, int nestedLevel = 0) {
- IOException e = new IOException(message)
- nestedLevel.times { int i ->
- e = new IOException("${message} ${i + 1}", e)
- }
- e
- }
-
- /**
- * Test that printing the exception does not cause an infinite loop
- */
- @Test
- void testRefreshShouldHandleExceptions() {
- // Arrange
- mockPP = [
- restore: { ->
- new PeerStatusCache([] as Set, System.currentTimeMillis(), DEFAULT_REMOTE_INSTANCE_URIS, SiteToSiteTransportProtocol.HTTP)
- },
- // Create the peer persistence to throw an exception on save
- save : { PeerStatusCache cache ->
- throw generateException("Custom error message", 3)
- }
- ] as PeerPersistence
-
- PeerSelector ps = new PeerSelector(mockPSP, mockPP)
-
- // Act
- ps.refreshPeerStatusCache()
- def peersToQuery = ps.getPeersToQuery()
-
- // Assert
- assertEquals(1, peersToQuery.size())
- assertTrue(peersToQuery.contains(BOOTSTRAP_PEER_DESCRIPTION))
- }
-
- /**
- * Test that the cache is not used if it does not match the transport protocol
- */
- @Test
- void testInitializationShouldIgnoreCacheWithWrongTransportProtocol() {
- // Arrange
- def nodes = DEFAULT_NODES
- def peerStatuses = DEFAULT_PEER_STATUSES
-
- // Create the peer status provider
- mockPSP = mockPeerStatusProvider()
-
- // Point to the persisted cache on disk
- final File cacheFile = File.createTempFile("peers", "txt")
- cacheFile.deleteOnExit()
-
- // Construct the cache contents (with wrong TP - mockPSP uses HTTP) and write to disk
- final String CACHE_CONTENTS = "${SiteToSiteTransportProtocol.RAW}\n" + peerStatuses.collect { PeerStatus ps ->
- [ps.peerDescription.hostname, ps.peerDescription.port, ps.peerDescription.isSecure(), ps.isQueryForPeers()].join(":")
- }.join("\n")
- cacheFile.text = CACHE_CONTENTS
-
- FilePeerPersistence filePP = new FilePeerPersistence(cacheFile)
-
- // Act
- PeerSelector ps = new PeerSelector(mockPSP, filePP)
-
- // The cache should be ignored because of the transport protocol mismatch
- def peersToQuery = ps.getPeersToQuery()
- logger.info("Retrieved ${peersToQuery.size()} peers to query: ${peersToQuery}")
-
- // Assert
- assertEquals(1, peersToQuery.size())
- assertTrue(peersToQuery.contains(BOOTSTRAP_PEER_DESCRIPTION))
- }
-
- /**
- * This test simulates a failure scenario of a remote NiFi cluster. It confirms that:
- *
- * - PeerSelector uses the bootstrap node to fetch remote peer statuses at the initial attempt
- * - PeerSelector uses one of query-able nodes lastly fetched successfully
- * - PeerSelector can refresh remote peer statuses even if the bootstrap node is down
- * - PeerSelector returns null as next peer when there's no peer available
- * - PeerSelector always tries to fetch peer statuses at least from the bootstrap node, so that it can
- * recover when the node gets back online
- *
- */
- @Test
- void testShouldFetchRemotePeerStatusesInFailureScenario() throws IOException {
- // Arrange
- int currentAttempt = 1
-
- // The bootstrap node is node1.nifi
- List nodes = ["node1.nifi", "node2.nifi"]
- Set peerStatuses = buildPeerStatuses(nodes)
-
- // Need references to the bootstrap and node2 later
- PeerStatus bootstrapStatus = peerStatuses.find { it.peerDescription.hostname == "node1.nifi" }
- PeerDescription bootstrapDescription = bootstrapStatus.peerDescription
-
- PeerStatus node2Status = peerStatuses.find { it.peerDescription.hostname == "node2.nifi" }
- PeerDescription node2Description = node2Status.peerDescription
-
- String remoteInstanceUris = buildRemoteInstanceUris(nodes)
-
- // Mock the PSP
- mockPSP = [
- getTransportProtocol : { ->
- SiteToSiteTransportProtocol.HTTP
- },
- getRemoteInstanceUris: { ->
- remoteInstanceUris
- },
- getBootstrapPeerDescription: { ->
- bootstrapDescription
- },
- fetchRemotePeerStatuses : { PeerDescription pd ->
- switch (currentAttempt) {
- case 1:
- return [bootstrapStatus, node2Status] as Set
- case 2..3:
- return [node2Status] as Set
- case 4:
- return [] as Set
- default:
- return [bootstrapStatus] as Set
- }
- }
- ] as PeerStatusProvider
-
- // Mock the PP with only these statuses
- mockPP = mockPeerPersistence(remoteInstanceUris, peerStatuses)
-
- PeerSelector ps = new PeerSelector(mockPSP, mockPP)
- ps.refresh()
- PeerStatus peerStatus = ps.getNextPeerStatus(TransferDirection.RECEIVE)
- logger.info("Attempt ${currentAttempt} - ${peerStatus}")
- assertNotNull(peerStatus)
-
- // Force the selector to refresh the cache
- currentAttempt++
- ps.refreshPeerStatusCache()
-
- // Attempt 2 & 3 - only node2 available (PSP will only return node2)
- 2.times {
- ps.refresh()
- peerStatus = ps.getNextPeerStatus(TransferDirection.RECEIVE)
- logger.info("Attempt ${currentAttempt} - ${peerStatus}")
- assertEquals(node2Status, peerStatus)
-
- // Force the selector to refresh the cache
- currentAttempt++
- ps.refreshPeerStatusCache()
- }
-
- // Attempt 4 - no available nodes
- ps.refresh()
- peerStatus = ps.getNextPeerStatus(TransferDirection.RECEIVE)
- logger.info("Attempt ${currentAttempt} - ${peerStatus}")
- assertNull(peerStatus)
-
- // Force the selector to refresh the cache
- currentAttempt = 5
- ps.refreshPeerStatusCache()
-
- // Attempt 5 - bootstrap node available
- ps.refresh()
- peerStatus = ps.getNextPeerStatus(TransferDirection.RECEIVE)
- logger.info("Attempt ${currentAttempt} - ${peerStatus}")
- assertEquals(bootstrapStatus, peerStatus)
- }
-
- // PeerQueue definition and tests
-
- /**
- * Tests the utility class {@link PeerQueue} used to track consecutive peer selection.
- */
- @Test
- void testPeerQueueShouldGetMaxConsecutiveElements() {
- // Arrange
- PeerQueue peerQueue = new PeerQueue(10)
- List nodes = (1..5).collect { "node${it}.nifi".toString() }
- List peerStatuses = new ArrayList<>(buildPeerStatuses(nodes))
-
- // Act
-
- // Same node every time
- 100.times { int i ->
- peerQueue.append(nodes.first())
-
- // Assert
- assertEquals(peerQueue.size(), peerQueue.getMaxConsecutiveElements())
- }
-
- // Never repeating node
- peerQueue.clear()
- 100.times { int i ->
- peerQueue.append(nodes.get(i % peerStatuses.size()))
-
- // Assert
- assertEquals(1, peerQueue.getMaxConsecutiveElements())
- }
-
- // Repeat up to nodes.size() times but no more
- peerQueue.clear()
- 100.times { int i ->
- // Puts the first node unless this is a multiple of the node count
- peerQueue.append((i % nodes.size() == 0) ? nodes.last() : nodes.first())
-
- // Assert
-// logger.debug("Most consecutive elements in queue: ${peerQueue.getMaxConsecutiveElements()} | ${peerQueue}")
- assertTrue(peerQueue.getMaxConsecutiveElements() <= peerStatuses.size())
- }
- }
-
- class PeerQueue extends ArrayBlockingQueue {
- PeerQueue(int capacity) {
- super(capacity)
- }
-
- int getTotalSize() {
- this.size() + this.remainingCapacity()
- }
-
- int getMaxConsecutiveElements() {
- int currentMax = 1, current = 1
- def iterator = this.iterator()
- Object prev = iterator.next()
- while (iterator.hasNext()) {
- def curr = iterator.next()
- if (prev == curr) {
- current++
- if (current > currentMax) {
- currentMax = current
- }
- } else {
- current = 1
- }
- prev = curr
- }
- return currentMax
- }
-
- Object getMostFrequentElement() {
- def map = this.groupBy { it }
- map.max { a, b -> a.value.size() <=> b.value.size() }.key
- }
-
- Object getMostCommonConsecutiveElement() {
- int currentMax = 1, current = 1
- def iterator = this.iterator()
- Object prev = iterator.next()
- Object mcce = prev
- while (iterator.hasNext()) {
- def curr = iterator.next()
- if (prev == curr) {
- current++
- if (current > currentMax) {
- currentMax = current
- mcce = curr
- }
- } else {
- current = 1
- }
- prev = curr
- }
- return mcce
- }
-
- /**
- * Adds the new Object to the tail of the queue. If the queue was full before, removes the head to open capacity.
- *
- * @param o the object to append
- */
- void append(Object o) {
- if (this.remainingCapacity() == 0) {
- this.remove()
- }
- this.put(o)
- }
- }
-}
\ No newline at end of file
diff --git a/nifi-nar-bundles/nifi-cassandra-bundle/nifi-cassandra-distributedmapcache-service/pom.xml b/nifi-nar-bundles/nifi-cassandra-bundle/nifi-cassandra-distributedmapcache-service/pom.xml
index 9f2c300c1f7a..0983e5c1d299 100644
--- a/nifi-nar-bundles/nifi-cassandra-bundle/nifi-cassandra-distributedmapcache-service/pom.xml
+++ b/nifi-nar-bundles/nifi-cassandra-bundle/nifi-cassandra-distributedmapcache-service/pom.xml
@@ -57,13 +57,6 @@
com.fasterxml.jackson.core
jackson-databind
-
- org.codehaus.groovy
- groovy-all
- pom
- test
-
-
org.apache.nifi
nifi-mock
diff --git a/nifi-nar-bundles/nifi-cassandra-bundle/nifi-cassandra-distributedmapcache-service/src/test/groovy/org/apache/nifi/CassandraDistributedMapCacheIT.groovy b/nifi-nar-bundles/nifi-cassandra-bundle/nifi-cassandra-distributedmapcache-service/src/test/groovy/org/apache/nifi/CassandraDistributedMapCacheIT.groovy
deleted file mode 100644
index 025312e35f05..000000000000
--- a/nifi-nar-bundles/nifi-cassandra-bundle/nifi-cassandra-distributedmapcache-service/src/test/groovy/org/apache/nifi/CassandraDistributedMapCacheIT.groovy
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi
-
-import com.datastax.driver.core.Cluster
-import com.datastax.driver.core.Session
-import org.apache.nifi.controller.cassandra.CassandraDistributedMapCache
-import org.apache.nifi.distributed.cache.client.Deserializer
-import org.apache.nifi.distributed.cache.client.Serializer
-import org.apache.nifi.processor.AbstractProcessor
-import org.apache.nifi.processor.ProcessContext
-import org.apache.nifi.processor.ProcessSession
-import org.apache.nifi.processor.exception.ProcessException
-import org.apache.nifi.service.CassandraSessionProvider
-import org.apache.nifi.util.TestRunner
-import org.apache.nifi.util.TestRunners
-import org.junit.jupiter.api.AfterAll
-import org.junit.jupiter.api.BeforeAll
-import org.junit.jupiter.api.Test
-import org.testcontainers.containers.CassandraContainer
-import org.testcontainers.junit.jupiter.Container
-import org.testcontainers.junit.jupiter.Testcontainers
-import org.testcontainers.utility.DockerImageName
-
-import static org.junit.jupiter.api.Assertions.assertEquals
-import static org.junit.jupiter.api.Assertions.assertFalse
-import static org.junit.jupiter.api.Assertions.assertTrue
-
-/**
- * Setup instructions:
- *
- * docker run -p 7000:7000 -p 9042:9042 --name cassandra --restart always -d cassandra:3
- *
- * docker exec -it cassandra cqlsh
- *
- * Keyspace CQL: create keyspace nifi_test with replication = { 'replication_factor': 1, 'class': 'SimpleStrategy' } ;
- *
- * Table SQL: create table dmc (id blob, value blob, primary key(id));
- */
-@Testcontainers
-class CassandraDistributedMapCacheIT {
- @Container
- static final CassandraContainer CASSANDRA_CONTAINER = new CassandraContainer(DockerImageName.parse("cassandra:4.1"))
- static TestRunner runner
- static CassandraDistributedMapCache distributedMapCache
- static Session session
-
- static final String KEYSPACE = "sample_keyspace"
-
- @BeforeAll
- static void setup() {
- runner = TestRunners.newTestRunner(new AbstractProcessor() {
- @Override
- void onTrigger(ProcessContext processContext, ProcessSession processSession) throws ProcessException {
-
- }
- })
- distributedMapCache = new CassandraDistributedMapCache()
-
- InetSocketAddress contactPoint = CASSANDRA_CONTAINER.getContactPoint()
- String connectionString = String.format("%s:%d", contactPoint.getHostName(), contactPoint.getPort())
-
- Cluster cluster = Cluster.builder().addContactPoint(contactPoint.getHostName())
- .withPort(contactPoint.getPort()).build();
- session = cluster.connect();
-
- session.execute("create keyspace nifi_test with replication = { 'replication_factor': 1, 'class': 'SimpleStrategy' }");
- session.execute("create table nifi_test.dmc (id blob, value blob, primary key(id))");
-
- def cassandraService = new CassandraSessionProvider()
- runner.addControllerService("provider", cassandraService)
- runner.addControllerService("dmc", distributedMapCache)
- runner.setProperty(cassandraService, CassandraSessionProvider.CONTACT_POINTS, connectionString)
- runner.setProperty(cassandraService, CassandraSessionProvider.KEYSPACE, "nifi_test")
- runner.setProperty(distributedMapCache, CassandraDistributedMapCache.SESSION_PROVIDER, "provider")
- runner.setProperty(distributedMapCache, CassandraDistributedMapCache.TABLE_NAME, "dmc")
- runner.setProperty(distributedMapCache, CassandraDistributedMapCache.KEY_FIELD_NAME, "id")
- runner.setProperty(distributedMapCache, CassandraDistributedMapCache.VALUE_FIELD_NAME, "value")
- runner.setProperty(distributedMapCache, CassandraDistributedMapCache.TTL, "5 sec")
- runner.enableControllerService(cassandraService)
- runner.enableControllerService(distributedMapCache)
- runner.assertValid()
-
- session = cassandraService.getCassandraSession();
- session.execute("""
- INSERT INTO dmc (id, value) VALUES(textAsBlob('contains-key'), textAsBlob('testvalue'))
- """)
- session.execute("""
- INSERT INTO dmc (id, value) VALUES(textAsBlob('delete-key'), textAsBlob('testvalue'))
- """)
- session.execute("""
- INSERT INTO dmc (id, value) VALUES(textAsBlob('get-and-put-key'), textAsBlob('testvalue'))
- """)
- }
-
- @AfterAll
- static void cleanup() {
- session.execute("TRUNCATE nifi_test.dmc")
- }
-
- Serializer serializer = { str, os ->
- os.write(str.bytes)
- } as Serializer
-
- Deserializer deserializer = { input ->
- new String(input)
- } as Deserializer
-
- @Test
- void testContainsKey() {
- assertTrue(distributedMapCache.containsKey("contains-key", serializer))
- }
-
- @Test
- void testGetAndPutIfAbsent() {
- String result = distributedMapCache.getAndPutIfAbsent('get-and-put-key', 'testing', serializer, serializer, deserializer)
- assertEquals("testvalue", result)
- }
-
- @Test
- void testRemove() {
- distributedMapCache.remove("delete-key", serializer)
- }
-
- @Test
- void testGet() {
- String result = distributedMapCache.get("contains-key", serializer, deserializer)
- assertEquals("testvalue", result)
- }
-
- @Test
- void testPut() {
- distributedMapCache.put("put-key", "sometestdata", serializer, serializer)
- Thread.sleep(1000)
- assertTrue(distributedMapCache.containsKey("put-key", serializer))
- }
-
- @Test
- void testPutIfAbsent() {
- assertTrue(distributedMapCache.putIfAbsent("put-if-absent-key", "testingthis", serializer, serializer))
- assertFalse(distributedMapCache.putIfAbsent("put-if-absent-key", "testingthis", serializer, serializer))
- }
-}
diff --git a/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-mock-record-utils/pom.xml b/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-mock-record-utils/pom.xml
index 353e909d093a..85c5664b15d4 100644
--- a/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-mock-record-utils/pom.xml
+++ b/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-mock-record-utils/pom.xml
@@ -53,40 +53,4 @@
test
-
-
-
-
- org.codehaus.mojo
- build-helper-maven-plugin
- 1.5
-
-
- add-source
- generate-sources
-
- add-source
-
-
-
- src/main/groovy
-
-
-
-
- add-test-source
- generate-test-sources
-
- add-test-source
-
-
-
- src/test/groovy
-
-
-
-
-
-
-
diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/pom.xml b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/pom.xml
index 994488d755c4..820373bfd9fd 100644
--- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/pom.xml
+++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/pom.xml
@@ -162,22 +162,6 @@
nifi-mock
test
-
-
-
- org.spockframework
- spock-core
- test
-
-
- cglib
- cglib-nodep
- test
-
-
- org.codehaus.groovy
- groovy-test
-
diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/StandardHttpResponseMapperSpec.groovy b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/StandardHttpResponseMapperSpec.groovy
deleted file mode 100644
index a80cb0f1ef55..000000000000
--- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/StandardHttpResponseMapperSpec.groovy
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.cluster.coordination.http
-
-import com.fasterxml.jackson.annotation.JsonInclude
-import com.fasterxml.jackson.databind.ObjectMapper
-import com.fasterxml.jackson.module.jaxb.JaxbAnnotationIntrospector
-import org.apache.nifi.cluster.manager.NodeResponse
-import org.apache.nifi.cluster.protocol.NodeIdentifier
-import org.apache.nifi.util.NiFiProperties
-import org.apache.nifi.web.api.dto.ConnectionDTO
-import org.apache.nifi.web.api.dto.ControllerConfigurationDTO
-import org.apache.nifi.web.api.dto.FunnelDTO
-import org.apache.nifi.web.api.dto.LabelDTO
-import org.apache.nifi.web.api.dto.PermissionsDTO
-import org.apache.nifi.web.api.dto.status.ConnectionStatusDTO
-import org.apache.nifi.web.api.dto.status.ConnectionStatusSnapshotDTO
-import org.apache.nifi.web.api.entity.ConnectionEntity
-import org.apache.nifi.web.api.entity.ConnectionsEntity
-import org.apache.nifi.web.api.entity.ControllerConfigurationEntity
-import org.apache.nifi.web.api.entity.FunnelEntity
-import org.apache.nifi.web.api.entity.FunnelsEntity
-import org.apache.nifi.web.api.entity.LabelEntity
-import org.apache.nifi.web.api.entity.LabelsEntity
-import spock.lang.Specification
-import spock.lang.Unroll
-
-import javax.ws.rs.core.Response
-import java.text.NumberFormat
-
-@Unroll
-class StandardHttpResponseMapperSpec extends Specification {
-
- def setup() {
- def propFile = StandardHttpResponseMapperSpec.class.getResource("/conf/nifi.properties").getFile()
- System.setProperty NiFiProperties.PROPERTIES_FILE_PATH, propFile
- }
-
- def cleanup() {
- System.clearProperty NiFiProperties.PROPERTIES_FILE_PATH
- }
-
- def "MergeResponses: mixed HTTP GET response statuses, expecting #expectedStatus"() {
- given:
- def responseMapper = new StandardHttpResponseMapper(NiFiProperties.createBasicNiFiProperties(null,null))
- def requestUri = new URI('http://server/resource')
- def requestId = UUID.randomUUID().toString()
- def Map> mockToRequestEntity = [:]
- def nodeResponseSet = nodeResponseData.collect {
- int n = it.node
- def response = Mock(Response)
- mockToRequestEntity.put response, it
- new NodeResponse(new NodeIdentifier("cluster-node-$n", 'addr', n, 'sktaddr', n * 10, 'sktaddr', n * 10, 'stsaddr', n * 100, n * 1000, false, null), "get", requestUri, response, 500L,
- requestId)
- } as Set
-
- when:
- def returnedResponse = responseMapper.mapResponses(requestUri, 'get', nodeResponseSet, true).getStatus()
-
- then:
- mockToRequestEntity.entrySet().forEach {
- Response response = it.key
- _ * response.getStatus() >> it.value.status
- }
- 0 * _
- returnedResponse == expectedStatus
-
- where:
- nodeResponseData || expectedStatus
- [[node: 1, status: 200], [node: 2, status: 200], [node: 3, status: 401]] as Set || 401
- [[node: 1, status: 200], [node: 2, status: 200], [node: 3, status: 403]] as Set || 403
- [[node: 1, status: 200], [node: 2, status: 403], [node: 3, status: 500]] as Set || 403
- [[node: 1, status: 200], [node: 2, status: 200], [node: 3, status: 500]] as Set || 500
- }
-
- def "MergeResponses: #responseEntities.size() HTTP 200 #httpMethod responses for #requestUriPart"() {
- given: "json serialization setup"
- def mapper = new ObjectMapper();
- mapper.setDefaultPropertyInclusion(JsonInclude.Value.construct(JsonInclude.Include.NON_NULL, JsonInclude.Include.ALWAYS));
- mapper.setAnnotationIntrospector(new JaxbAnnotationIntrospector(mapper.getTypeFactory()));
-
- and: "setup of the data to be used in the test"
- def responseMerger = new StandardHttpResponseMapper(NiFiProperties.createBasicNiFiProperties(null,null))
- def requestUri = new URI("http://server/$requestUriPart")
- def requestId = UUID.randomUUID().toString()
- def Map mockToRequestEntity = [:]
- def n = 0
- def nodeResponseSet = responseEntities.collect {
- ++n
- def response = Mock(Response)
- mockToRequestEntity.put response, it
- new NodeResponse(new NodeIdentifier("cluster-node-$n", 'addr', n, 'sktaddr', n * 10, 'sktaddr', n * 11, 'stsaddr', n * 100, n * 1000, false, null), "get", requestUri, response, 500L,
- requestId)
- } as Set
-
- when:
- def returnedResponse = responseMerger.mapResponses(requestUri, httpMethod, nodeResponseSet, true)
-
- then:
- mockToRequestEntity.entrySet().forEach {
- Response response = it.key
- def entity = it.value
- _ * response.getStatus() >> 200
- 1 * response.readEntity(_) >> entity
- }
- responseEntities.size() == mockToRequestEntity.size()
- 0 * _
- def returnedJson = mapper.writeValueAsString(returnedResponse.getUpdatedEntity())
- def expectedJson = mapper.writeValueAsString(expectedEntity)
- returnedJson == expectedJson
-
- where:
- requestUriPart | httpMethod | responseEntities ||
- expectedEntity
- 'nifi-api/controller/config' | 'get' | [
- new ControllerConfigurationEntity(permissions: new PermissionsDTO(canRead: true, canWrite: true),
- component: new ControllerConfigurationDTO(maxTimerDrivenThreadCount: 10)),
- new ControllerConfigurationEntity(permissions: new PermissionsDTO(canRead: true, canWrite: false),
- component: new ControllerConfigurationDTO(maxTimerDrivenThreadCount: 10)),
- new ControllerConfigurationEntity(permissions: new PermissionsDTO(canRead: true, canWrite: true),
- component: new ControllerConfigurationDTO(maxTimerDrivenThreadCount: 10))] ||
- // expectedEntity
- new ControllerConfigurationEntity(permissions: new PermissionsDTO(canRead: true, canWrite: false),
- component: new ControllerConfigurationDTO(maxTimerDrivenThreadCount: 10))
- 'nifi-api/controller/config' | 'put' | [
- new ControllerConfigurationEntity(permissions: new PermissionsDTO(canRead: true, canWrite: true),
- component: new ControllerConfigurationDTO(maxTimerDrivenThreadCount: 10)),
- new ControllerConfigurationEntity(permissions: new PermissionsDTO(canRead: true, canWrite: false),
- component: new ControllerConfigurationDTO(maxTimerDrivenThreadCount: 10)),
- new ControllerConfigurationEntity(permissions: new PermissionsDTO(canRead: true, canWrite: true),
- component: new ControllerConfigurationDTO(maxTimerDrivenThreadCount: 10))] ||
- // expectedEntity
- new ControllerConfigurationEntity(permissions: new PermissionsDTO(canRead: true, canWrite: false),
- component: new ControllerConfigurationDTO(maxTimerDrivenThreadCount: 10))
- "nifi-api/process-groups/${UUID.randomUUID()}/connections" | 'get' | [
- new ConnectionsEntity(connections: [new ConnectionEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), status: new
- ConnectionStatusDTO(aggregateSnapshot: new ConnectionStatusSnapshotDTO(bytesIn: 300)), component: new ConnectionDTO())] as Set),
- new ConnectionsEntity(connections: [new ConnectionEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false), status: new
- ConnectionStatusDTO(aggregateSnapshot: new ConnectionStatusSnapshotDTO(bytesIn: 100)))] as Set),
- new ConnectionsEntity(connections: [new ConnectionEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), status: new
- ConnectionStatusDTO(aggregateSnapshot: new ConnectionStatusSnapshotDTO(bytesIn: 500)), component: new ConnectionDTO())] as Set)] ||
- // expectedEntity
- new ConnectionsEntity(connections: [new ConnectionEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false),
- status: new ConnectionStatusDTO(aggregateSnapshot: new ConnectionStatusSnapshotDTO(bytesIn: 900,
- input: '0 (900 bytes)', output: '0 (0 bytes)', queued: '0 (0 bytes)', queuedSize: '0 bytes', queuedCount: 0)))] as Set)
- "nifi-api/process-groups/${UUID.randomUUID()}/connections" | 'post' | [
- new ConnectionEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), status:
- new ConnectionStatusDTO(aggregateSnapshot: new ConnectionStatusSnapshotDTO(bytesIn: 300)), component: new ConnectionDTO()),
- new ConnectionEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false), status:
- new ConnectionStatusDTO(aggregateSnapshot: new ConnectionStatusSnapshotDTO(bytesIn: 300))),
- new ConnectionEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), status:
- new ConnectionStatusDTO(aggregateSnapshot: new ConnectionStatusSnapshotDTO(bytesIn: 300)), component: new ConnectionDTO())] ||
- // expectedEntity
- new ConnectionEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false),
- status: new ConnectionStatusDTO(aggregateSnapshot: new ConnectionStatusSnapshotDTO(bytesIn: 900, input: '0 (900 bytes)',
- output: '0 (0 bytes)', queued: '0 (0 bytes)', queuedSize: '0 bytes', queuedCount: 0)))
- "nifi-api/connections/${UUID.randomUUID()}" | 'get' | [
- new ConnectionEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), status:
- new ConnectionStatusDTO(aggregateSnapshot: new ConnectionStatusSnapshotDTO(bytesIn: 400)), component: new ConnectionDTO()),
- new ConnectionEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false), status:
- new ConnectionStatusDTO(aggregateSnapshot: new ConnectionStatusSnapshotDTO(bytesIn: 300))),
- new ConnectionEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), status:
- new ConnectionStatusDTO(aggregateSnapshot: new ConnectionStatusSnapshotDTO(bytesIn: 300)), component: new ConnectionDTO())] ||
- // expectedEntity
- new ConnectionEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false),
- status: new ConnectionStatusDTO(aggregateSnapshot: new ConnectionStatusSnapshotDTO(bytesIn: 1000,
- input: "0 (${NumberFormat.instance.format(1000)} bytes)", output: '0 (0 bytes)', queued: '0 (0 bytes)', queuedSize: '0 bytes', queuedCount: 0)))
- "nifi-api/process-groups/${UUID.randomUUID()}/labels" | 'get' | [
- new LabelsEntity(labels: [new LabelEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), component: new LabelDTO())] as Set),
- new LabelsEntity(labels: [new LabelEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false))] as Set),
- new LabelsEntity(labels: [new LabelEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), component: new LabelDTO())] as Set)] ||
- // expectedEntity
- new LabelsEntity(labels: [new LabelEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false))] as Set)
- "nifi-api/process-groups/${UUID.randomUUID()}/labels" | 'post' | [
- new LabelEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), component: new LabelDTO()),
- new LabelEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false)),
- new LabelEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), component: new LabelDTO())] ||
- // expectedEntity
- new LabelEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false))
- "nifi-api/labels/${UUID.randomUUID()}" | 'get' | [
- new LabelEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), component: new LabelDTO()),
- new LabelEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false)),
- new LabelEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), component: new LabelDTO())] ||
- // expectedEntity
- new LabelEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false))
- "nifi-api/process-groups/${UUID.randomUUID()}/funnels" | 'get' | [
- new FunnelsEntity(funnels: [new FunnelEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), component: new FunnelDTO())] as Set),
- new FunnelsEntity(funnels: [new FunnelEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false))] as Set),
- new FunnelsEntity(funnels: [new FunnelEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), component: new FunnelDTO())] as Set)] ||
- // expectedEntity
- new FunnelsEntity(funnels: [new FunnelEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false))] as Set)
- "nifi-api/process-groups/${UUID.randomUUID()}/funnels" | 'post' | [
- new FunnelEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), component: new FunnelDTO()),
- new FunnelEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false)),
- new FunnelEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), component: new FunnelDTO())] ||
- // expectedEntity
- new LabelEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false))
- "nifi-api/funnels/${UUID.randomUUID()}" | 'get' | [
- new FunnelEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), component: new FunnelDTO()),
- new FunnelEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false)),
- new FunnelEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), component: new FunnelDTO())] ||
- // expectedEntity
- new FunnelEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false))
- }
-}
diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/endpoints/StatusHistoryEndpointMergerSpec.groovy b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/endpoints/StatusHistoryEndpointMergerSpec.groovy
deleted file mode 100644
index 104e69b29b13..000000000000
--- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/endpoints/StatusHistoryEndpointMergerSpec.groovy
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.cluster.coordination.http.endpoints
-
-import com.fasterxml.jackson.annotation.JsonInclude
-import com.fasterxml.jackson.databind.ObjectMapper
-import com.fasterxml.jackson.module.jaxb.JaxbAnnotationIntrospector
-import org.apache.nifi.cluster.manager.NodeResponse
-import org.apache.nifi.cluster.protocol.NodeIdentifier
-import org.apache.nifi.util.NiFiProperties
-import org.apache.nifi.web.api.dto.status.StatusHistoryDTO
-import org.apache.nifi.web.api.entity.StatusHistoryEntity
-import spock.lang.Specification
-import spock.lang.Unroll
-
-import javax.ws.rs.core.Response
-
-class StatusHistoryEndpointMergerSpec extends Specification {
-
- def setup() {
- def propFile = StatusHistoryEndpointMergerSpec.class.getResource("/conf/nifi.properties").getFile()
- System.setProperty NiFiProperties.PROPERTIES_FILE_PATH, propFile
- }
-
- def cleanup() {
- System.clearProperty NiFiProperties.PROPERTIES_FILE_PATH
- }
-
- @Unroll
- def "Merge component details based on permission"() {
- given: "json serialization setup"
- def mapper = new ObjectMapper();
- mapper.setDefaultPropertyInclusion(JsonInclude.Value.construct(JsonInclude.Include.NON_NULL, JsonInclude.Include.ALWAYS));
- mapper.setAnnotationIntrospector(new JaxbAnnotationIntrospector(mapper.getTypeFactory()));
-
- and: "setup of the data to be used in the test"
- def merger = new StatusHistoryEndpointMerger(2)
- def requestUri = new URI("http://server/$requestUriPart")
- def requestId = UUID.randomUUID().toString()
- def Map mockToRequestEntity = [:]
- def n = 0
- def nodeResponseSet = responseEntities.collect {
- ++n
- def response = Mock(Response)
- mockToRequestEntity.put response, it
- new NodeResponse(new NodeIdentifier("cluster-node-$n", 'addr', n, 'sktaddr', n * 10, null, n * 10, 'stsaddr', n * 100, n * 1000, false, null),
- "GET", requestUri, response, 500L, requestId)
- } as Set
-
- when:
- def returnedResponse = merger.merge(requestUri, httpMethod, nodeResponseSet, [] as Set, nodeResponseSet[0])
-
- then:
- mockToRequestEntity.entrySet().forEach {
- Response response = it.key
- def entity = it.value
- _ * response.getStatus() >> 200
- 1 * response.readEntity(_) >> entity
- }
- responseEntities.size() == mockToRequestEntity.size()
- 0 * _
- (returnedResponse.getUpdatedEntity() as StatusHistoryEntity).canRead == expectedEntity.canRead
- (returnedResponse.getUpdatedEntity() as StatusHistoryEntity).statusHistory.componentDetails == expectedEntity.statusHistory.componentDetails
-
- where:
- requestUriPart | httpMethod | responseEntities ||
- expectedEntity
- "/nifi-api/flow/connections/${UUID.randomUUID()}/status/history" | 'get' | [
- new StatusHistoryEntity(canRead: true, statusHistory: new StatusHistoryDTO(componentDetails: [key1: 'real', key2: 'real'], nodeSnapshots: [], aggregateSnapshots: [])),
- new StatusHistoryEntity(canRead: false, statusHistory: new StatusHistoryDTO(componentDetails: [key1: 'hidden', key2: 'hidden'], nodeSnapshots: [], aggregateSnapshots: [])),
- new StatusHistoryEntity(canRead: true, statusHistory: new StatusHistoryDTO(componentDetails: [key1: 'real', key2: 'real'], nodeSnapshots: [], aggregateSnapshots: []))
- ] ||
- new StatusHistoryEntity(canRead: false, statusHistory: new StatusHistoryDTO(componentDetails: [key1: 'hidden', key2: 'hidden'], nodeSnapshots: [], aggregateSnapshots: []))
- }
-}
diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/ConnectionEntityMergerSpec.groovy b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/ConnectionEntityMergerSpec.groovy
deleted file mode 100644
index 83d301b1a3ea..000000000000
--- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/ConnectionEntityMergerSpec.groovy
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.cluster.manager
-
-import com.fasterxml.jackson.annotation.JsonInclude
-import com.fasterxml.jackson.databind.ObjectMapper
-import com.fasterxml.jackson.module.jaxb.JaxbAnnotationIntrospector
-import org.apache.nifi.cluster.protocol.NodeIdentifier
-import org.apache.nifi.web.api.dto.ConnectionDTO
-import org.apache.nifi.web.api.dto.PermissionsDTO
-import org.apache.nifi.web.api.dto.status.ConnectionStatusDTO
-import org.apache.nifi.web.api.dto.status.ConnectionStatusSnapshotDTO
-import org.apache.nifi.web.api.entity.ConnectionEntity
-import spock.lang.Specification
-import spock.lang.Unroll
-
-class ConnectionEntityMergerSpec extends Specification {
-
- @Unroll
- def "Merge"() {
- given:
- def mapper = new ObjectMapper();
- mapper.setDefaultPropertyInclusion(JsonInclude.Value.construct(JsonInclude.Include.NON_NULL, JsonInclude.Include.ALWAYS));
- mapper.setAnnotationIntrospector(new JaxbAnnotationIntrospector(mapper.getTypeFactory()));
-
- def entity = nodeEntityMap.entrySet().first().value
-
- when:
- new ConnectionEntityMerger().merge(entity, nodeEntityMap)
-
- then:
- def mergedEntityJson = mapper.writeValueAsString(entity)
- def expectedJson = mapper.writeValueAsString(expectedMergedEntity)
- mergedEntityJson == expectedJson
-
- where:
- nodeEntityMap ||
- expectedMergedEntity
- [(createNodeIdentifier(1)): new ConnectionEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), status: new
- ConnectionStatusDTO(aggregateSnapshot: new ConnectionStatusSnapshotDTO(bytesIn: 300)), component: new ConnectionDTO()),
- (createNodeIdentifier(2)): new ConnectionEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false), status: new
- ConnectionStatusDTO(aggregateSnapshot: new ConnectionStatusSnapshotDTO(bytesIn: 100))),
- (createNodeIdentifier(3)): new ConnectionEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), status: new
- ConnectionStatusDTO(aggregateSnapshot: new ConnectionStatusSnapshotDTO(bytesIn: 500)), component: new ConnectionDTO())] ||
- new ConnectionEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false),
- status: new ConnectionStatusDTO(aggregateSnapshot: new ConnectionStatusSnapshotDTO(bytesIn: 900, input: '0 (900 bytes)',
- output: '0 (0 bytes)', queued: '0 (0 bytes)', queuedSize: '0 bytes', queuedCount: 0)))
-
- }
-
- def createNodeIdentifier(int id) {
- new NodeIdentifier("cluster-node-$id", 'addr', id, 'sktaddr', id * 10, null, id * 10, 'stsaddr', id * 100, id * 1000, false, null)
- }
-}
diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/ControllerServiceEntityMergerSpec.groovy b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/ControllerServiceEntityMergerSpec.groovy
deleted file mode 100644
index a240e362f3ab..000000000000
--- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/ControllerServiceEntityMergerSpec.groovy
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.cluster.manager
-
-import com.fasterxml.jackson.annotation.JsonInclude
-import com.fasterxml.jackson.databind.ObjectMapper
-import com.fasterxml.jackson.module.jaxb.JaxbAnnotationIntrospector
-import org.apache.nifi.cluster.protocol.NodeIdentifier
-import org.apache.nifi.controller.service.ControllerServiceState
-import org.apache.nifi.web.api.dto.ControllerServiceDTO
-import org.apache.nifi.web.api.dto.ControllerServiceReferencingComponentDTO
-import org.apache.nifi.web.api.dto.PermissionsDTO
-import org.apache.nifi.web.api.entity.ControllerServiceEntity
-import org.apache.nifi.web.api.entity.ControllerServiceReferencingComponentEntity
-import spock.lang.Specification
-import spock.lang.Unroll
-
-@Unroll
-class ControllerServiceEntityMergerSpec extends Specification {
- def "MergeComponents"() {
- def mapper = new ObjectMapper();
- mapper.setDefaultPropertyInclusion(JsonInclude.Value.construct(JsonInclude.Include.NON_NULL, JsonInclude.Include.ALWAYS));
- mapper.setAnnotationIntrospector(new JaxbAnnotationIntrospector(mapper.getTypeFactory()));
-
- def entity = nodeEntityMap.entrySet().first().value
-
- when:
- new ControllerServiceEntityMerger().merge(entity, nodeEntityMap)
-
- then:
- def mergedEntityJson = mapper.writeValueAsString(entity)
- def expectedJson = mapper.writeValueAsString(expectedMergedEntity)
- mergedEntityJson == expectedJson
-
- where:
- nodeEntityMap ||
- expectedMergedEntity
-
- // Simple ControllerServiceEntity merging
- [(createNodeIdentifier(1)): new ControllerServiceEntity(id: '1',
- permissions: new PermissionsDTO(canRead: true, canWrite: true),
- operatePermissions: new PermissionsDTO(canRead: false, canWrite: true),
- component: new ControllerServiceDTO()),
-
- (createNodeIdentifier(2)): new ControllerServiceEntity(id: '1',
- permissions: new PermissionsDTO(canRead: false, canWrite: false),
- operatePermissions: new PermissionsDTO(canRead: false, canWrite: true),
- component: new ControllerServiceDTO()),
-
- (createNodeIdentifier(3)): new ControllerServiceEntity(id: '1',
- permissions: new PermissionsDTO(canRead: true, canWrite: true),
- operatePermissions: new PermissionsDTO(canRead: false, canWrite: false),
- component: new ControllerServiceDTO())] ||
-
- new ControllerServiceEntity(id: '1',
- permissions: new PermissionsDTO(canRead: false, canWrite: false),
- operatePermissions: new PermissionsDTO(canRead: false, canWrite: false))
-
-
- // Controller Reference merging for canRead==false
- [(createNodeIdentifier(1)): new ControllerServiceEntity(id: '1',
- permissions: new PermissionsDTO(canRead: true, canWrite: true),
- operatePermissions: new PermissionsDTO(canRead: false, canWrite: true),
- component: new ControllerServiceDTO(referencingComponents: [new ControllerServiceReferencingComponentEntity(
- permissions: new PermissionsDTO(canRead: true, canWrite: true),
- operatePermissions: new PermissionsDTO(canRead: false, canWrite: true),
- component: new ControllerServiceReferencingComponentDTO(activeThreadCount: 1, state: ControllerServiceState.ENABLING.name()))])),
-
- (createNodeIdentifier(2)): new ControllerServiceEntity(id: '1',
- permissions: new PermissionsDTO(canRead: true, canWrite: true),
- operatePermissions: new PermissionsDTO(canRead: false, canWrite: true),
- component: new ControllerServiceDTO(referencingComponents: [new ControllerServiceReferencingComponentEntity(
- permissions: new PermissionsDTO(canRead: false, canWrite: false),
- operatePermissions: new PermissionsDTO(canRead: false, canWrite: false),
- component: new ControllerServiceReferencingComponentDTO(activeThreadCount: 1, state: ControllerServiceState.ENABLING.name()))])),
-
- (createNodeIdentifier(3)): new ControllerServiceEntity(id: '1',
- permissions: new PermissionsDTO(canRead: true, canWrite: true),
- operatePermissions: new PermissionsDTO(canRead: false, canWrite: true),
- component: new ControllerServiceDTO(referencingComponents: [new ControllerServiceReferencingComponentEntity(
- permissions: new PermissionsDTO(canRead: true, canWrite: true),
- operatePermissions: new PermissionsDTO(canRead: false, canWrite: true),
- component: new ControllerServiceReferencingComponentDTO(activeThreadCount: 1, state: ControllerServiceState.ENABLING.name()))]))] ||
-
- new ControllerServiceEntity(id: '1',
- permissions: new PermissionsDTO(canRead: true, canWrite: true),
- operatePermissions: new PermissionsDTO(canRead: false, canWrite: true),
- bulletins: [],
- component: new ControllerServiceDTO(validationErrors: [], validationStatus: "VALID",
- referencingComponents: [new ControllerServiceReferencingComponentEntity(
- permissions: new PermissionsDTO(canRead: false, canWrite: false),
- operatePermissions: new PermissionsDTO(canRead: false, canWrite: false))]))
-
-
- // Controller Reference merging for canRead==true
- [(createNodeIdentifier(1)): new ControllerServiceEntity(id: '1',
- permissions: new PermissionsDTO(canRead: true, canWrite: true),
- operatePermissions: new PermissionsDTO(canRead: false, canWrite: true),
- bulletins: [],
- component: new ControllerServiceDTO(referencingComponents: [new ControllerServiceReferencingComponentEntity(
- permissions: new PermissionsDTO(canRead: true, canWrite: true),
- operatePermissions: new PermissionsDTO(canRead: false, canWrite: true),
- component: new ControllerServiceReferencingComponentDTO(activeThreadCount: 1, state: ControllerServiceState.ENABLING.name()))])),
-
- (createNodeIdentifier(2)): new ControllerServiceEntity(id: '1',
- permissions: new PermissionsDTO(canRead: true, canWrite: true),
- operatePermissions: new PermissionsDTO(canRead: false, canWrite: true),
- bulletins: [],
- component: new ControllerServiceDTO(referencingComponents: [new ControllerServiceReferencingComponentEntity(
- permissions: new PermissionsDTO(canRead: true, canWrite: true),
- operatePermissions: new PermissionsDTO(canRead: false, canWrite: true),
- component: new ControllerServiceReferencingComponentDTO(activeThreadCount: 1, state: ControllerServiceState.ENABLING.name()))])),
-
- (createNodeIdentifier(3)): new ControllerServiceEntity(id: '1',
- permissions: new PermissionsDTO(canRead: true, canWrite: true),
- operatePermissions: new PermissionsDTO(canRead: false, canWrite: true),
- bulletins: [],
- component: new ControllerServiceDTO(referencingComponents: [new ControllerServiceReferencingComponentEntity(
- permissions: new PermissionsDTO(canRead: true, canWrite: true),
- operatePermissions: new PermissionsDTO(canRead: false, canWrite: true),
- component: new ControllerServiceReferencingComponentDTO(activeThreadCount: 1, state: ControllerServiceState.ENABLING.name()))]))] ||
-
- new ControllerServiceEntity(id: '1',
- permissions: new PermissionsDTO(canRead: true, canWrite: true),
- operatePermissions: new PermissionsDTO(canRead: false, canWrite: true),
- bulletins: [],
- component: new ControllerServiceDTO(validationErrors: [], validationStatus: "VALID",
- referencingComponents: [new ControllerServiceReferencingComponentEntity(
- permissions: new PermissionsDTO(canRead: true, canWrite: true),
- operatePermissions: new PermissionsDTO(canRead: false, canWrite: true),
- bulletins: [],
- component: new ControllerServiceReferencingComponentDTO(activeThreadCount: 3, state: ControllerServiceState.ENABLING.name()))]))
- }
-
- def "MergeControllerServiceReferences"() {
-
- }
-
- def createNodeIdentifier(int id) {
- new NodeIdentifier("cluster-node-$id", 'addr', id, 'sktaddr', id * 10, null, id * 10, 'stsaddr', id * 100, id * 1000, false, null)
- }
-}
diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/LabelEntityMergerSpec.groovy b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/LabelEntityMergerSpec.groovy
deleted file mode 100644
index 0a485b268b8a..000000000000
--- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/LabelEntityMergerSpec.groovy
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.cluster.manager
-
-import com.fasterxml.jackson.annotation.JsonInclude
-import com.fasterxml.jackson.databind.ObjectMapper
-import com.fasterxml.jackson.module.jaxb.JaxbAnnotationIntrospector
-import org.apache.nifi.cluster.protocol.NodeIdentifier
-import org.apache.nifi.web.api.dto.LabelDTO
-import org.apache.nifi.web.api.dto.PermissionsDTO
-import org.apache.nifi.web.api.entity.LabelEntity
-import spock.lang.Specification
-import spock.lang.Unroll
-
-@Unroll
-class LabelEntityMergerSpec extends Specification {
- def "Merge"() {
- given:
- def mapper = new ObjectMapper();
- mapper.setDefaultPropertyInclusion(JsonInclude.Value.construct(JsonInclude.Include.NON_NULL, JsonInclude.Include.ALWAYS));
- mapper.setAnnotationIntrospector(new JaxbAnnotationIntrospector(mapper.getTypeFactory()));
-
- def entity = nodeEntityMap.entrySet().first().value
-
- when:
- new LabelEntityMerger().merge(entity, nodeEntityMap)
-
- then:
- def mergedEntityJson = mapper.writeValueAsString(entity)
- def expectedJson = mapper.writeValueAsString(expectedMergedEntity)
- mergedEntityJson == expectedJson
-
- where:
- nodeEntityMap ||
- expectedMergedEntity
- [(createNodeIdentifier(1)): new LabelEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), component: new LabelDTO(label: 'label')),
- (createNodeIdentifier(2)): new LabelEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false)),
- (createNodeIdentifier(3)): new LabelEntity(id: '1', permissions: new PermissionsDTO(canRead: true, canWrite: true), component: new LabelDTO(label: 'label'))] ||
- new LabelEntity(id: '1', permissions: new PermissionsDTO(canRead: false, canWrite: false))
-
- }
-
- def createNodeIdentifier(int id) {
- new NodeIdentifier("cluster-node-$id", 'addr', id, 'sktaddr', id * 10, null, id * 10, 'stsaddr', id * 100, id * 1000, false, null)
- }
-}
diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/PermissionBasedStatusMergerSpec.groovy b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/PermissionBasedStatusMergerSpec.groovy
deleted file mode 100644
index f45c888d40b6..000000000000
--- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/PermissionBasedStatusMergerSpec.groovy
+++ /dev/null
@@ -1,325 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.cluster.manager
-
-import com.fasterxml.jackson.annotation.JsonInclude
-import com.fasterxml.jackson.databind.ObjectMapper
-import com.fasterxml.jackson.module.jaxb.JaxbAnnotationIntrospector
-import org.apache.nifi.web.api.dto.status.ConnectionStatusDTO
-import org.apache.nifi.web.api.dto.status.ConnectionStatusSnapshotDTO
-import org.apache.nifi.web.api.dto.status.PortStatusDTO
-import org.apache.nifi.web.api.dto.status.PortStatusSnapshotDTO
-import org.apache.nifi.web.api.dto.status.ProcessGroupStatusDTO
-import org.apache.nifi.web.api.dto.status.ProcessGroupStatusSnapshotDTO
-import org.apache.nifi.web.api.dto.status.ProcessorStatusDTO
-import org.apache.nifi.web.api.dto.status.ProcessorStatusSnapshotDTO
-import org.apache.nifi.web.api.dto.status.RemoteProcessGroupStatusDTO
-import org.apache.nifi.web.api.dto.status.RemoteProcessGroupStatusSnapshotDTO
-import spock.lang.Specification
-import spock.lang.Unroll
-
-@Unroll
-class PermissionBasedStatusMergerSpec extends Specification {
- def "Merge ConnectionStatusDTO"() {
- given:
- def mapper = new ObjectMapper();
- mapper.setDefaultPropertyInclusion(JsonInclude.Value.construct(JsonInclude.Include.NON_NULL, JsonInclude.Include.ALWAYS));
- mapper.setAnnotationIntrospector(new JaxbAnnotationIntrospector(mapper.getTypeFactory()));
-
- def merger = new StatusMerger()
-
- when:
- merger.merge(target, targetCanRead, toMerge, toMergeCanRead, 'nodeid', 'nodeaddress', 1234)
-
- then:
- def returnedJson = mapper.writeValueAsString(target)
- def expectedJson = mapper.writeValueAsString(expectedDto)
- returnedJson == expectedJson
-
- where:
- target | targetCanRead |
- toMerge | toMergeCanRead ||
- expectedDto
- new ConnectionStatusDTO(groupId: 'real', id: 'real', name: 'real', sourceId: 'real', sourceName: 'real', destinationId: 'real', destinationName: 'real') | true |
- new ConnectionStatusDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', sourceId: 'hidden', sourceName: 'hidden', destinationId: 'hidden',
- destinationName: 'hidden') | false ||
- new ConnectionStatusDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', sourceId: 'hidden', sourceName: 'hidden', destinationId: 'hidden',
- destinationName: 'hidden')
- new ConnectionStatusDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', sourceId: 'hidden', sourceName: 'hidden', destinationId: 'hidden', destinationName: 'hidden') | false |
- new ConnectionStatusDTO(groupId: 'real', id: 'real', name: 'real', sourceId: 'real', sourceName: 'real', destinationId: 'real', destinationName: 'real') | true ||
- new ConnectionStatusDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', sourceId: 'hidden', sourceName: 'hidden', destinationId: 'hidden', destinationName: 'hidden')
- }
-
- def "Merge ConnectionStatusSnapshotDTO"() {
- given:
- def mapper = new ObjectMapper();
- mapper.setDefaultPropertyInclusion(JsonInclude.Value.construct(JsonInclude.Include.NON_NULL, JsonInclude.Include.ALWAYS));
- mapper.setAnnotationIntrospector(new JaxbAnnotationIntrospector(mapper.getTypeFactory()));
-
- def merger = new StatusMerger()
-
- when:
- merger.merge(target, targetCanRead, toMerge, toMergeCanRead)
-
- then:
- def returnedJson = mapper.writeValueAsString(target)
- def expectedJson = mapper.writeValueAsString(expectedDto)
- returnedJson == expectedJson
-
- where:
- target | targetCanRead |
- toMerge | toMergeCanRead ||
- expectedDto
- new ConnectionStatusSnapshotDTO(groupId: 'real', id: 'real', name: 'real', sourceId: 'real', sourceName: 'real', destinationId: 'real', destinationName: 'real') | true |
- new ConnectionStatusSnapshotDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', sourceId: 'hidden', sourceName: 'hidden', destinationId: 'hidden',
- destinationName: 'hidden') | false ||
- new ConnectionStatusSnapshotDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', sourceId: 'hidden', sourceName: 'hidden', destinationId: 'hidden',
- destinationName: 'hidden', input: '0 (0 bytes)', output: '0 (0 bytes)', queued: '0 (0 bytes)', queuedSize: '0 bytes', queuedCount: '0')
- new ConnectionStatusSnapshotDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', sourceId: 'hidden', sourceName: 'hidden', destinationId: 'hidden',
- destinationName: 'hidden') | false |
- new ConnectionStatusSnapshotDTO(groupId: 'real', id: 'real', name: 'real', sourceId: 'real', sourceName: 'real', destinationId: 'real', destinationName: 'real') | true ||
- new ConnectionStatusSnapshotDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', sourceId: 'hidden', sourceName: 'hidden', destinationId: 'hidden',
- destinationName: 'hidden', input: '0 (0 bytes)', output: '0 (0 bytes)', queued: '0 (0 bytes)', queuedSize: '0 bytes', queuedCount: '0')
- }
-
- def "Merge PortStatusDTO"() {
- given:
- def mapper = new ObjectMapper();
- mapper.setDefaultPropertyInclusion(JsonInclude.Value.construct(JsonInclude.Include.NON_NULL, JsonInclude.Include.ALWAYS));
- mapper.setAnnotationIntrospector(new JaxbAnnotationIntrospector(mapper.getTypeFactory()));
-
- def merger = new StatusMerger()
-
- when:
- merger.merge(target, targetCanRead, toMerge, toMergeCanRead, 'nodeid', 'nodeaddress', 1234)
-
- then:
- def returnedJson = mapper.writeValueAsString(target)
- def expectedJson = mapper.writeValueAsString(expectedDto)
- returnedJson == expectedJson
-
- where:
- target | targetCanRead |
- toMerge | toMergeCanRead ||
- expectedDto
- new PortStatusDTO(groupId: 'real', id: 'real', name: 'real', transmitting: 'false') | true |
- new PortStatusDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', transmitting: 'false') | false ||
- new PortStatusDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', transmitting: 'false')
- new PortStatusDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', transmitting: 'false') | false |
- new PortStatusDTO(groupId: 'real', id: 'real', name: 'real', transmitting: 'false') | true ||
- new PortStatusDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', transmitting: 'false')
- }
-
- def "Merge PortStatusSnapshotDTO"() {
- given:
- def mapper = new ObjectMapper();
- mapper.setDefaultPropertyInclusion(JsonInclude.Value.construct(JsonInclude.Include.NON_NULL, JsonInclude.Include.ALWAYS));
- mapper.setAnnotationIntrospector(new JaxbAnnotationIntrospector(mapper.getTypeFactory()));
-
- def merger = new StatusMerger()
-
- when:
- merger.merge(target, targetCanRead, toMerge, toMergeCanRead)
-
- then:
- def returnedJson = mapper.writeValueAsString(target)
- def expectedJson = mapper.writeValueAsString(expectedDto)
- returnedJson == expectedJson
-
- where:
- target | targetCanRead |
- toMerge | toMergeCanRead ||
- expectedDto
- new PortStatusSnapshotDTO(groupId: 'real', id: 'real', name: 'real') | true |
- new PortStatusSnapshotDTO(groupId: 'hidden', id: 'hidden', name: 'hidden') | false ||
- new PortStatusSnapshotDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', input: '0 (0 bytes)', output: '0 (0 bytes)', transmitting: false)
- new PortStatusSnapshotDTO(groupId: 'hidden', id: 'hidden', name: 'hidden') | false |
- new PortStatusSnapshotDTO(groupId: 'real', id: 'real', name: 'real') | true ||
- new PortStatusSnapshotDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', input: '0 (0 bytes)', output: '0 (0 bytes)', transmitting: false)
- }
-
- def "Merge ProcessGroupStatusDTO"() {
- given:
- def mapper = new ObjectMapper();
- mapper.setDefaultPropertyInclusion(JsonInclude.Value.construct(JsonInclude.Include.NON_NULL, JsonInclude.Include.ALWAYS));
- mapper.setAnnotationIntrospector(new JaxbAnnotationIntrospector(mapper.getTypeFactory()));
-
- def merger = new StatusMerger()
-
- when:
- merger.merge(target, targetCanRead, toMerge, toMergeCanRead, 'nodeid', 'nodeaddress', 1234)
-
- then:
- def returnedJson = mapper.writeValueAsString(target)
- def expectedJson = mapper.writeValueAsString(expectedDto)
- returnedJson == expectedJson
-
- where:
- target | targetCanRead |
- toMerge | toMergeCanRead ||
- expectedDto
- new ProcessGroupStatusDTO(id: 'real', name: 'real') | true | new ProcessGroupStatusDTO(id: 'hidden', name: 'hidden') | false ||
- new ProcessGroupStatusDTO(id: 'hidden', name: 'hidden')
- new ProcessGroupStatusDTO(id: 'hidden', name: 'hidden') | false | new ProcessGroupStatusDTO(id: 'real', name: 'real') | true ||
- new ProcessGroupStatusDTO(id: 'hidden', name: 'hidden')
- }
-
- def "Merge ProcessGroupStatusSnapshotDTO"() {
- given:
- def mapper = new ObjectMapper();
- mapper.setDefaultPropertyInclusion(JsonInclude.Value.construct(JsonInclude.Include.NON_NULL, JsonInclude.Include.ALWAYS));
- mapper.setAnnotationIntrospector(new JaxbAnnotationIntrospector(mapper.getTypeFactory()));
-
- def merger = new StatusMerger()
-
- when:
- merger.merge(target, targetCanRead, toMerge, toMergeCanRead)
-
- then:
- def returnedJson = mapper.writeValueAsString(target)
- def expectedJson = mapper.writeValueAsString(expectedDto)
- returnedJson == expectedJson
-
- where:
- target | targetCanRead |
- toMerge | toMergeCanRead ||
- expectedDto
- new ProcessGroupStatusSnapshotDTO(id: 'real', name: 'real') | true | new ProcessGroupStatusSnapshotDTO(id: 'hidden', name: 'hidden') | false ||
- new ProcessGroupStatusSnapshotDTO(id: 'hidden', name: 'hidden', input: '0 (0 bytes)', output: '0 (0 bytes)', transferred: '0 (0 bytes)', read: '0 bytes', written: '0' +
- ' bytes',
- queued: '0 (0 bytes)', queuedSize: '0 bytes', queuedCount: '0', received: '0 (0 bytes)', sent: '0 (0 bytes)', connectionStatusSnapshots: [], inputPortStatusSnapshots: [],
- outputPortStatusSnapshots: [], processorStatusSnapshots: [], remoteProcessGroupStatusSnapshots: [])
- new ProcessGroupStatusSnapshotDTO(id: 'hidden', name: 'hidden') | false | new ProcessGroupStatusSnapshotDTO(id: 'real', name: 'real') | true ||
- new ProcessGroupStatusSnapshotDTO(id: 'hidden', name: 'hidden', input: '0 (0 bytes)', output: '0 (0 bytes)', transferred: '0 (0 bytes)', read: '0 bytes', written: '0 bytes',
- queued: '0 (0 bytes)', queuedSize: '0 bytes', queuedCount: '0', received: '0 (0 bytes)', sent: '0 (0 bytes)', connectionStatusSnapshots: [], inputPortStatusSnapshots: [],
- outputPortStatusSnapshots: [], processorStatusSnapshots: [], remoteProcessGroupStatusSnapshots: [])
- }
-
- def "Merge ProcessorStatusDTO"() {
- given:
- def mapper = new ObjectMapper();
- mapper.setDefaultPropertyInclusion(JsonInclude.Value.construct(JsonInclude.Include.NON_NULL, JsonInclude.Include.ALWAYS));
- mapper.setAnnotationIntrospector(new JaxbAnnotationIntrospector(mapper.getTypeFactory()));
-
- def merger = new StatusMerger()
-
- when:
- merger.merge(target, targetCanRead, toMerge, toMergeCanRead, 'nodeid', 'nodeaddress', 1234)
-
- then:
- def returnedJson = mapper.writeValueAsString(target)
- def expectedJson = mapper.writeValueAsString(expectedDto)
- returnedJson == expectedJson
-
- where:
- target | targetCanRead |
- toMerge | toMergeCanRead ||
- expectedDto
- new ProcessorStatusDTO(groupId: 'real', id: 'real', name: 'real', type: 'real') | true |
- new ProcessorStatusDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', type: 'hidden') | false ||
- new ProcessorStatusDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', type: 'hidden')
- new ProcessorStatusDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', type: 'hidden') | false |
- new ProcessorStatusDTO(groupId: 'real', id: 'real', name: 'real', type: 'real') | true ||
- new ProcessorStatusDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', type: 'hidden')
- }
-
- def "Merge ProcessorStatusSnapshotDTO"() {
- given:
- def mapper = new ObjectMapper();
- mapper.setDefaultPropertyInclusion(JsonInclude.Value.construct(JsonInclude.Include.NON_NULL, JsonInclude.Include.ALWAYS));
- mapper.setAnnotationIntrospector(new JaxbAnnotationIntrospector(mapper.getTypeFactory()));
-
- def merger = new StatusMerger()
-
- when:
- merger.merge(target, targetCanRead, toMerge, toMergeCanRead)
-
- then:
- def returnedJson = mapper.writeValueAsString(target)
- def expectedJson = mapper.writeValueAsString(expectedDto)
- returnedJson == expectedJson
-
- where:
- target | targetCanRead |
- toMerge | toMergeCanRead ||
- expectedDto
- new ProcessorStatusSnapshotDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', type: 'hidden') | false |
- new ProcessorStatusSnapshotDTO(groupId: 'real', id: 'real', name: 'real', type: 'real') | true ||
- new ProcessorStatusSnapshotDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', type: 'hidden', input: '0 (0 bytes)', output: '0 (0 bytes)', read: '0 bytes',
- written: '0 bytes', tasks: '0', tasksDuration: '00:00:00.000')
- new ProcessorStatusSnapshotDTO(groupId: 'real', id: 'real', name: 'real', type: 'real') | true |
- new ProcessorStatusSnapshotDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', type: 'hidden') | false ||
- new ProcessorStatusSnapshotDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', type: 'hidden', input: '0 (0 bytes)', output: '0 (0 bytes)', read: '0 bytes',
- written: '0 bytes', tasks: '0', tasksDuration: '00:00:00.000')
- }
-
- def "Merge RemoteProcessGroupStatusDTO"() {
- given:
- def mapper = new ObjectMapper();
- mapper.setDefaultPropertyInclusion(JsonInclude.Value.construct(JsonInclude.Include.NON_NULL, JsonInclude.Include.ALWAYS));
- mapper.setAnnotationIntrospector(new JaxbAnnotationIntrospector(mapper.getTypeFactory()));
-
- def merger = new StatusMerger()
-
- when:
- merger.merge(target, targetCanRead, toMerge, toMergeCanRead, 'nodeid', 'nodeaddress', 1234)
-
- then:
- def returnedJson = mapper.writeValueAsString(target)
- def expectedJson = mapper.writeValueAsString(expectedDto)
- returnedJson == expectedJson
-
- where:
- target | targetCanRead |
- toMerge | toMergeCanRead ||
- expectedDto
- new RemoteProcessGroupStatusDTO(groupId: 'real', id: 'real', name: 'real', targetUri: 'real') | true |
- new RemoteProcessGroupStatusDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', targetUri: 'hidden') | false ||
- new RemoteProcessGroupStatusDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', targetUri: 'hidden')
- new RemoteProcessGroupStatusDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', targetUri: 'hidden') | false |
- new RemoteProcessGroupStatusDTO(groupId: 'real', id: 'real', name: 'real', targetUri: 'real') | true ||
- new RemoteProcessGroupStatusDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', targetUri: 'hidden')
- }
-
- def "Merge RemoteProcessGroupStatusSnapshotDTO"() {
- given:
- def mapper = new ObjectMapper();
- mapper.setDefaultPropertyInclusion(JsonInclude.Value.construct(JsonInclude.Include.NON_NULL, JsonInclude.Include.ALWAYS));
- mapper.setAnnotationIntrospector(new JaxbAnnotationIntrospector(mapper.getTypeFactory()));
-
- def merger = new StatusMerger()
-
- when:
- merger.merge(target, targetCanRead, toMerge, toMergeCanRead)
-
- then:
- def returnedJson = mapper.writeValueAsString(target)
- def expectedJson = mapper.writeValueAsString(expectedDto)
- returnedJson == expectedJson
-
- where:
- target | targetCanRead |
- toMerge | toMergeCanRead ||
- expectedDto
- new RemoteProcessGroupStatusSnapshotDTO(groupId: 'real', id: 'real', name: 'real', targetUri: 'real') | true |
- new RemoteProcessGroupStatusSnapshotDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', targetUri: 'hidden') | false ||
- new RemoteProcessGroupStatusSnapshotDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', targetUri: 'hidden', received: '0 (0 bytes)', sent: '0 (0 bytes)')
- new RemoteProcessGroupStatusSnapshotDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', targetUri: 'hidden') | false |
- new RemoteProcessGroupStatusSnapshotDTO(groupId: 'real', id: 'real', name: 'real', targetUri: 'real') | true ||
- new RemoteProcessGroupStatusSnapshotDTO(groupId: 'hidden', id: 'hidden', name: 'hidden', targetUri: 'hidden', received: '0 (0 bytes)', sent: '0 (0 bytes)')
- }
-}
diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/pom.xml b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/pom.xml
index 5d596ca4f355..775f1d3a4f4c 100644
--- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/pom.xml
+++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/pom.xml
@@ -268,16 +268,6 @@
2.0.0-SNAPSHOT
test
-
- org.spockframework
- spock-core
- test
-
-
- cglib
- cglib-nodep
- test
-
org.eclipse.jgit
org.eclipse.jgit
@@ -299,12 +289,6 @@
2.0.0-SNAPSHOT
test
-
- org.codehaus.groovy
- groovy-dateutil
- ${nifi.groovy.version}
- test
-
io.netty
netty-handler
diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/groovy/org/apache/nifi/controller/PositionScalerSpec.groovy b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/groovy/org/apache/nifi/controller/PositionScalerSpec.groovy
deleted file mode 100644
index c7e0dd85a37a..000000000000
--- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/groovy/org/apache/nifi/controller/PositionScalerSpec.groovy
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.controller
-
-import org.apache.nifi.connectable.Connectable
-import org.apache.nifi.connectable.Position
-import org.apache.nifi.connectable.Positionable
-import spock.lang.Specification
-import spock.lang.Unroll
-
-class PositionScalerSpec extends Specification {
-
- @Unroll
- def "scale #positionableType.getSimpleName()"() {
- given:
- def positionable = Mock positionableType
-
- when:
- PositionScaler.scale positionable, factorX, factorY
-
- then:
- 1 * positionable.position >> new Position(originalX, originalY)
- 1 * positionable.setPosition(_) >> { Position p ->
- assert p.x == newX
- assert p.y == newY
- }
-
- where:
- positionableType | originalX | originalY | factorX | factorY | newX | newY
- Connectable | 10 | 10 | 1.5 | 1.5 | 15 | 15
- Positionable | -10 | -10 | 1.5 | 1.5 | -15 | -15
- }
-
- //TODO Test scaling of a ProcessGroup
-}
\ No newline at end of file
diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/groovy/org/apache/nifi/controller/queue/clustered/partition/NonLocalPartitionPartitionerSpec.groovy b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/groovy/org/apache/nifi/controller/queue/clustered/partition/NonLocalPartitionPartitionerSpec.groovy
deleted file mode 100644
index f3cfd4c47476..000000000000
--- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/groovy/org/apache/nifi/controller/queue/clustered/partition/NonLocalPartitionPartitionerSpec.groovy
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.controller.queue.clustered.partition
-
-
-import org.apache.nifi.controller.repository.FlowFileRecord
-import spock.lang.Specification
-import spock.lang.Unroll
-
-import java.util.concurrent.ConcurrentLinkedQueue
-import java.util.concurrent.Executors
-import java.util.concurrent.TimeUnit
-
-class NonLocalPartitionPartitionerSpec extends Specification {
-
- def "getPartition chooses local partition with 1 partition and throws IllegalStateException"() {
- given: "a local partitioner using a local partition"
- def partitioner = new NonLocalPartitionPartitioner()
- def localPartition = Mock QueuePartition
- def partitions = [localPartition] as QueuePartition[]
- def flowFileRecord = Mock FlowFileRecord
-
- when: "a partition is requested from the partitioner"
- partitioner.getPartition flowFileRecord, partitions, localPartition
-
- then: "an IllegalStateExceptions thrown"
- thrown(IllegalStateException)
- }
-
- @Unroll
- def "getPartition chooses non-local partition with #maxPartitions partitions, #threads threads, #iterations iterations"() {
- given: "a local partitioner"
- def partitioner = new NonLocalPartitionPartitioner()
- def partitions = new QueuePartition[maxPartitions]
-
- and: "a local partition"
- def localPartition = Mock QueuePartition
- partitions[0] = localPartition
-
- and: "one or more multiple partitions"
- for (int id = 1; id < maxPartitions; ++id) {
- def partition = Mock QueuePartition
- partitions[id] = partition
- }
-
- and: "an array to hold the resulting chosen partitions and an executor service with one or more threads"
- def flowFileRecord = Mock FlowFileRecord
- def chosenPartitions = [] as ConcurrentLinkedQueue
- def executorService = Executors.newFixedThreadPool threads
-
- when: "a partition is requested from the partitioner for a given flowfile record and the existing partitions"
- iterations.times {
- executorService.submit {
- chosenPartitions.add partitioner.getPartition(flowFileRecord, partitions, localPartition)
- }
- }
- executorService.shutdown()
- try {
- while (!executorService.awaitTermination(10, TimeUnit.MILLISECONDS)) {
- Thread.sleep(10)
- }
- } catch (InterruptedException e) {
- executorService.shutdownNow()
- Thread.currentThread().interrupt()
- }
-
- then: "no exceptions are thrown"
- noExceptionThrown()
-
- and: "there is a chosen partition for each iteration"
- chosenPartitions.size() == iterations
-
- and: "each chosen partition is a remote partition and is one of the existing partitions"
- def validChosenPartitions = chosenPartitions.findAll { it != localPartition && partitions.contains(it) }
-
- and: "there is a valid chosen partition for each iteration"
- validChosenPartitions.size() == iterations
-
- and: "there are no other mock interactions"
- 0 * _
-
- where:
- maxPartitions | threads | iterations
- 2 | 1 | 1
- 2 | 1 | 10
- 2 | 1 | 100
- 2 | 10 | 1000
- 5 | 1 | 1
- 5 | 1 | 10
- 5 | 1 | 100
- 5 | 10 | 1000
- }
-}
diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-runtime/pom.xml b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-runtime/pom.xml
index 89ffe9e372c5..b41bfbbfb8aa 100644
--- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-runtime/pom.xml
+++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-runtime/pom.xml
@@ -73,39 +73,4 @@
test
-
-
-
- org.codehaus.mojo
- build-helper-maven-plugin
- 1.5
-
-
- add-source
- generate-sources
-
- add-source
-
-
-
- src/main/groovy
-
-
-
-
- add-test-source
- generate-test-sources
-
- add-test-source
-
-
-
- src/test/groovy
-
-
-
-
-
-
-
diff --git a/nifi-nar-bundles/nifi-graph-bundle/nifi-graph-test-clients/src/test/groovy/org/apache/nifi/graph/InMemoryJanusGraphClientServiceTest.groovy b/nifi-nar-bundles/nifi-graph-bundle/nifi-graph-test-clients/src/test/groovy/org/apache/nifi/graph/InMemoryJanusGraphClientServiceTest.groovy
deleted file mode 100644
index fdf37e7b33e3..000000000000
--- a/nifi-nar-bundles/nifi-graph-bundle/nifi-graph-test-clients/src/test/groovy/org/apache/nifi/graph/InMemoryJanusGraphClientServiceTest.groovy
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.nifi.graph
-
-import org.apache.nifi.processor.AbstractProcessor
-import org.apache.nifi.processor.ProcessContext
-import org.apache.nifi.processor.ProcessSession
-import org.apache.nifi.processor.exception.ProcessException
-import org.apache.nifi.util.TestRunners
-import org.junit.jupiter.api.Test
-
-class InMemoryJanusGraphClientServiceTest {
- @Test
- void test() {
- def runner = TestRunners.newTestRunner(new AbstractProcessor() {
- @Override
- void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
-
- }
- })
- def service = new InMemoryJanusGraphClientService()
- runner.addControllerService("service", service)
- runner.enableControllerService(service)
-
- def create = """
- 1.upto(10) {
- g.addV("test").property("uuid", UUID.randomUUID().toString()).next()
- }
- """
-
- service.executeQuery(create, [:], { record, more ->
- assert !more
-
- } as GraphQueryResultCallback)
-
- def query = """
- g.V().hasLabel("test").count().next()
- """
-
- def executed = false
- service.executeQuery(query, [:], { record, more ->
- assert record["result"] == 10
- executed = true
- } as GraphQueryResultCallback)
-
- assert executed
- }
-}
diff --git a/nifi-nar-bundles/nifi-provenance-repository-bundle/nifi-persistent-provenance-repository/pom.xml b/nifi-nar-bundles/nifi-provenance-repository-bundle/nifi-persistent-provenance-repository/pom.xml
index 2f1b172a75e6..c3e7fe33c123 100644
--- a/nifi-nar-bundles/nifi-provenance-repository-bundle/nifi-persistent-provenance-repository/pom.xml
+++ b/nifi-nar-bundles/nifi-provenance-repository-bundle/nifi-persistent-provenance-repository/pom.xml
@@ -105,11 +105,6 @@
bcprov-jdk18on
test
-
- org.codehaus.groovy
- groovy-test
- test
-
diff --git a/nifi-nar-bundles/nifi-provenance-repository-bundle/nifi-persistent-provenance-repository/src/test/groovy/org/apache/nifi/provenance/EncryptedSchemaRecordReaderWriterTest.groovy b/nifi-nar-bundles/nifi-provenance-repository-bundle/nifi-persistent-provenance-repository/src/test/groovy/org/apache/nifi/provenance/EncryptedSchemaRecordReaderWriterTest.groovy
deleted file mode 100644
index acac14246abf..000000000000
--- a/nifi-nar-bundles/nifi-provenance-repository-bundle/nifi-persistent-provenance-repository/src/test/groovy/org/apache/nifi/provenance/EncryptedSchemaRecordReaderWriterTest.groovy
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.provenance
-
-import org.apache.nifi.flowfile.FlowFile
-import org.apache.nifi.provenance.serialization.RecordReader
-import org.apache.nifi.provenance.serialization.RecordWriter
-import org.apache.nifi.provenance.toc.StandardTocReader
-import org.apache.nifi.provenance.toc.StandardTocWriter
-import org.apache.nifi.provenance.toc.TocReader
-import org.apache.nifi.provenance.toc.TocUtil
-import org.apache.nifi.provenance.toc.TocWriter
-import org.apache.nifi.repository.encryption.AesGcmByteArrayRepositoryEncryptor
-import org.apache.nifi.repository.encryption.RepositoryEncryptor
-import org.apache.nifi.repository.encryption.configuration.EncryptionMetadataHeader
-import org.apache.nifi.security.kms.KeyProvider
-import org.bouncycastle.jce.provider.BouncyCastleProvider
-import org.bouncycastle.util.encoders.Hex
-import org.junit.jupiter.api.BeforeAll
-import org.junit.jupiter.api.BeforeEach
-import org.junit.jupiter.api.Test
-import org.slf4j.Logger
-import org.slf4j.LoggerFactory
-
-import javax.crypto.spec.SecretKeySpec
-import java.security.KeyManagementException
-import java.security.Security
-import java.util.concurrent.atomic.AtomicLong
-
-import static org.apache.nifi.provenance.TestUtil.createFlowFile
-import static org.junit.jupiter.api.Assertions.assertEquals
-import static org.junit.jupiter.api.Assertions.assertNotNull
-import static org.junit.jupiter.api.Assertions.assertNull
-import static org.junit.jupiter.api.Assertions.assertThrows
-
-class EncryptedSchemaRecordReaderWriterTest extends AbstractTestRecordReaderWriter {
- private static final Logger logger = LoggerFactory.getLogger(EncryptedSchemaRecordReaderWriterTest.class)
-
- private static final String KEY_HEX_128 = "0123456789ABCDEFFEDCBA9876543210"
- private static final String KEY_HEX = KEY_HEX_128
- private static final String KEY_ID = "K1"
-
- private static final String TRANSIT_URI = "nifi://unit-test"
- private static final String PROCESSOR_TYPE = "Mock Processor"
- private static final String COMPONENT_ID = "1234"
-
- private static final int UNCOMPRESSED_BLOCK_SIZE = 1024 * 32
- private static final int MAX_ATTRIBUTE_SIZE = 2048
-
- private static final AtomicLong idGenerator = new AtomicLong(0L)
- private File journalFile
- private File tocFile
-
- private static KeyProvider mockKeyProvider
- private static RepositoryEncryptor repositoryEncryptor
-
- @BeforeAll
- static void setUpOnce() throws Exception {
- Security.addProvider(new BouncyCastleProvider())
-
- mockKeyProvider = [
- getKey : { String keyId ->
- if (keyId == KEY_ID) {
- new SecretKeySpec(Hex.decode(KEY_HEX), "AES")
- } else {
- throw new KeyManagementException("${keyId} is not available")
- }
- },
- getAvailableKeyIds: { ->
- [KEY_ID]
- },
- keyExists : { String keyId ->
- keyId == KEY_ID
- }] as KeyProvider
- repositoryEncryptor = new AesGcmByteArrayRepositoryEncryptor(mockKeyProvider, EncryptionMetadataHeader.PROVENANCE)
- }
-
- @BeforeEach
- void setUp() throws Exception {
- journalFile = File.createTempFile(getClass().simpleName, ".journal")
- journalFile.deleteOnExit()
- tocFile = TocUtil.getTocFile(journalFile)
- idGenerator.set(0L)
- }
-
- private static
- final FlowFile buildFlowFile(Map attributes = [:], long id = idGenerator.getAndIncrement(),
- long fileSize = 3000L) {
- if (!attributes?.uuid) {
- attributes.uuid = UUID.randomUUID().toString()
- }
- createFlowFile(id, fileSize, attributes)
- }
-
- private static ProvenanceEventRecord buildEventRecord(FlowFile flowfile = buildFlowFile(),
- ProvenanceEventType eventType = ProvenanceEventType.RECEIVE,
- String transitUri = TRANSIT_URI, String componentId = COMPONENT_ID,
- String componentType = PROCESSOR_TYPE,
- long eventTime = System.currentTimeMillis()) {
- final ProvenanceEventBuilder builder = new StandardProvenanceEventRecord.Builder()
- builder.setEventTime(eventTime)
- builder.setEventType(eventType)
- builder.setTransitUri(transitUri)
- builder.fromFlowFile(flowfile)
- builder.setComponentId(componentId)
- builder.setComponentType(componentType)
- builder.build()
- }
-
- @Override
- protected RecordWriter createWriter(
- final File file,
- final TocWriter tocWriter, final boolean compressed, final int uncompressedBlockSize) throws IOException {
- createWriter(file, tocWriter, compressed, uncompressedBlockSize, repositoryEncryptor)
- }
-
- protected static RecordWriter createWriter(
- final File file,
- final TocWriter tocWriter,
- final boolean compressed,
- final int uncompressedBlockSize, RepositoryEncryptor encryptor) throws IOException {
- return new EncryptedSchemaRecordWriter(file, idGenerator, tocWriter, compressed, uncompressedBlockSize, IdentifierLookup.EMPTY, encryptor, KEY_ID)
- }
-
- @Override
- protected RecordReader createReader(
- final InputStream inputStream,
- final String journalFilename, final TocReader tocReader, final int maxAttributeSize) throws IOException {
- return new EncryptedSchemaRecordReader(inputStream, journalFilename, tocReader, maxAttributeSize, repositoryEncryptor)
- }
-
- /**
- * Build a record and write it to the repository with the encrypted writer. Recover with the encrypted reader and verify.
- */
- @Test
- void testShouldWriteAndReadEncryptedRecord() {
- // Arrange
- final ProvenanceEventRecord record = buildEventRecord()
- logger.info("Built sample PER: ${record}")
-
- TocWriter tocWriter = new StandardTocWriter(tocFile, false, false)
-
- RecordWriter encryptedWriter = createWriter(journalFile, tocWriter, false, UNCOMPRESSED_BLOCK_SIZE)
- logger.info("Generated encrypted writer: ${encryptedWriter}")
-
- // Act
- long encryptedRecordId = idGenerator.get()
- encryptedWriter.writeHeader(encryptedRecordId)
- encryptedWriter.writeRecords(Collections.singletonList(record))
- encryptedWriter.close()
- logger.info("Wrote encrypted record ${encryptedRecordId} to journal")
-
- // Assert
- TocReader tocReader = new StandardTocReader(tocFile)
- final FileInputStream fis = new FileInputStream(journalFile)
- final RecordReader reader = createReader(fis, journalFile.getName(), tocReader, MAX_ATTRIBUTE_SIZE)
- logger.info("Generated encrypted reader: ${reader}")
-
- ProvenanceEventRecord encryptedEvent = reader.nextRecord()
- assertNotNull(encryptedEvent)
- assertEquals(encryptedRecordId, encryptedEvent.getEventId())
- assertEquals(record.componentId, encryptedEvent.getComponentId())
- assertEquals(record.componentType, encryptedEvent.getComponentType())
- logger.info("Successfully read encrypted record: ${encryptedEvent}")
-
- assertNull(reader.nextRecord())
- }
-
- /**
- * Build a record and write it with a standard writer and the encrypted writer to different repositories.
- * Recover with the standard reader and the contents of the encrypted record should be unreadable.
- */
- @Test
- void testShouldWriteEncryptedRecordAndPlainRecord() {
- // Arrange
- final ProvenanceEventRecord record = buildEventRecord()
- logger.info("Built sample PER: ${record}")
-
- TocWriter tocWriter = new StandardTocWriter(tocFile, false, false)
-
- RecordWriter standardWriter = new EventIdFirstSchemaRecordWriter(journalFile, idGenerator, tocWriter, false, UNCOMPRESSED_BLOCK_SIZE, IdentifierLookup.EMPTY)
- logger.info("Generated standard writer: ${standardWriter}")
-
- File encryptedJournalFile = new File(journalFile.absolutePath + "_encrypted")
- File encryptedTocFile = TocUtil.getTocFile(encryptedJournalFile)
- TocWriter encryptedTocWriter = new StandardTocWriter(encryptedTocFile, false, false)
- RecordWriter encryptedWriter = createWriter(encryptedJournalFile, encryptedTocWriter, false, UNCOMPRESSED_BLOCK_SIZE)
- logger.info("Generated encrypted writer: ${encryptedWriter}")
-
- // Act
- long standardRecordId = idGenerator.get()
- standardWriter.writeHeader(standardRecordId)
- standardWriter.writeRecords(Collections.singletonList(record))
- standardWriter.close()
- logger.info("Wrote standard record ${standardRecordId} to journal")
-
- long encryptedRecordId = idGenerator.get()
- encryptedWriter.writeHeader(encryptedRecordId)
- encryptedWriter.writeRecords(Collections.singletonList(record))
- encryptedWriter.close()
- logger.info("Wrote encrypted record ${encryptedRecordId} to journal")
-
- // Assert
- TocReader tocReader = new StandardTocReader(tocFile)
- final FileInputStream fis = new FileInputStream(journalFile)
- final RecordReader reader = new EventIdFirstSchemaRecordReader(fis, journalFile.getName(), tocReader, MAX_ATTRIBUTE_SIZE)
- logger.info("Generated standard reader: ${reader}")
-
- ProvenanceEventRecord standardEvent = reader.nextRecord()
- assertNotNull(standardEvent)
- assertEquals(standardRecordId, standardEvent.getEventId())
- assertEquals(record.componentId, standardEvent.getComponentId())
- assertEquals(record.componentType, standardEvent.getComponentType())
- logger.info("Successfully read standard record: ${standardEvent}")
-
- assertNull(reader.nextRecord())
-
- // Demonstrate unable to read from encrypted file with standard reader
- TocReader incompatibleTocReader = new StandardTocReader(encryptedTocFile)
- final FileInputStream efis = new FileInputStream(encryptedJournalFile)
- RecordReader incompatibleReader = new EventIdFirstSchemaRecordReader(efis, encryptedJournalFile.getName(), incompatibleTocReader, MAX_ATTRIBUTE_SIZE)
- logger.info("Generated standard reader (attempting to read encrypted file): ${incompatibleReader}")
-
- assertThrows(EOFException.class, () -> incompatibleReader.nextRecord())
- }
-}
diff --git a/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/src/test/groovy/org/apache/nifi/lookup/script/TestScriptedLookupService.groovy b/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/src/test/groovy/org/apache/nifi/lookup/script/TestScriptedLookupService.groovy
deleted file mode 100644
index 9324bec584cf..000000000000
--- a/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/src/test/groovy/org/apache/nifi/lookup/script/TestScriptedLookupService.groovy
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.lookup.script
-
-import org.apache.nifi.processor.AbstractProcessor
-import org.apache.nifi.processor.ProcessContext
-import org.apache.nifi.processor.ProcessSession
-import org.apache.nifi.processor.exception.ProcessException
-import org.apache.nifi.processors.script.AccessibleScriptingComponentHelper
-import org.apache.nifi.script.ScriptingComponentHelper
-import org.apache.nifi.script.ScriptingComponentUtils
-import org.apache.nifi.util.MockFlowFile
-import org.apache.nifi.util.TestRunner
-import org.apache.nifi.util.TestRunners
-import org.junit.jupiter.api.BeforeAll
-import org.junit.jupiter.api.BeforeEach
-import org.junit.jupiter.api.Test
-import org.slf4j.Logger
-import org.slf4j.LoggerFactory
-
-import java.nio.file.Files
-import java.nio.file.Path
-import java.nio.file.Paths
-import java.nio.file.StandardCopyOption
-
-import static org.junit.jupiter.api.Assertions.assertEquals
-import static org.junit.jupiter.api.Assertions.assertFalse
-import static org.junit.jupiter.api.Assertions.assertTrue
-
-/**
- * Unit tests for the ScriptedLookupService controller service
- */
-class TestScriptedLookupService {
- private static final String GROOVY_SCRIPT = "test_lookup_inline.groovy"
- private static final String ALTERNATE_GROOVY_SCRIPT = "test_simple_lookup_inline.groovy"
- private static final Path SOURCE_PATH = Paths.get("src/test/resources/groovy", GROOVY_SCRIPT)
- private static final Path ALTERNATE_SOURCE_PATH = Paths.get("src/test/resources/groovy", ALTERNATE_GROOVY_SCRIPT)
- private static final Path TARGET_PATH = Paths.get("target", GROOVY_SCRIPT)
- private static final Path ALTERNATE_TARGET_PATH = Paths.get("target", ALTERNATE_GROOVY_SCRIPT)
- private static final Logger logger = LoggerFactory.getLogger(TestScriptedLookupService)
- ScriptedLookupService scriptedLookupService
- def scriptingComponent
-
-
- @BeforeAll
- static void setUpOnce() throws Exception {
- logger.metaClass.methodMissing = {String name, args ->
- logger.info("[${name?.toUpperCase()}] ${(args as List).join(" ")}")
- }
- Files.copy(SOURCE_PATH, TARGET_PATH, StandardCopyOption.REPLACE_EXISTING)
- Files.copy(ALTERNATE_SOURCE_PATH, ALTERNATE_TARGET_PATH, StandardCopyOption.REPLACE_EXISTING)
- TARGET_PATH.toFile().deleteOnExit()
- ALTERNATE_TARGET_PATH.toFile().deleteOnExit()
- }
-
- @BeforeEach
- void setUp() {
- scriptedLookupService = new MockScriptedLookupService()
- scriptingComponent = (AccessibleScriptingComponentHelper) scriptedLookupService
- }
-
- @Test
- void testLookupServiceGroovyScript() {
- final TestRunner runner = TestRunners.newTestRunner(new AbstractProcessor() {
- @Override
- public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
- }
- });
-
- runner.addControllerService("lookupService", scriptedLookupService);
- runner.setProperty(scriptedLookupService, "Script Engine", "Groovy");
- runner.setProperty(scriptedLookupService, ScriptingComponentUtils.SCRIPT_FILE, TARGET_PATH.toString());
- runner.setProperty(scriptedLookupService, ScriptingComponentUtils.SCRIPT_BODY, (String) null);
- runner.setProperty(scriptedLookupService, ScriptingComponentUtils.MODULES, (String) null);
- runner.enableControllerService(scriptedLookupService);
-
- MockFlowFile mockFlowFile = new MockFlowFile(1L)
- InputStream inStream = new ByteArrayInputStream('Flow file content not used'.bytes)
-
- Optional opt = scriptedLookupService.lookup(['key':'Hello'])
- assertTrue(opt.present)
- assertEquals('Hi', opt.get())
- opt = scriptedLookupService.lookup(['key':'World'])
- assertTrue(opt.present)
- assertEquals('there', opt.get())
- opt = scriptedLookupService.lookup(['key':'Not There'])
- assertFalse(opt.present)
- }
-
- @Test
- void testLookupServiceScriptReload() {
- final TestRunner runner = TestRunners.newTestRunner(new AbstractProcessor() {
- @Override
- public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
- }
- });
-
- runner.addControllerService("lookupService", scriptedLookupService)
- runner.setProperty(scriptedLookupService, "Script Engine", "Groovy")
- runner.setProperty(scriptedLookupService, ScriptingComponentUtils.SCRIPT_BODY, (String) null)
- runner.setProperty(scriptedLookupService, ScriptingComponentUtils.SCRIPT_FILE, TARGET_PATH.toString())
- runner.setProperty(scriptedLookupService, ScriptingComponentUtils.MODULES, (String) null)
- runner.enableControllerService(scriptedLookupService)
-
- Optional opt = scriptedLookupService.lookup(['key':'Hello'])
- assertTrue(opt.present)
- assertEquals('Hi', opt.get())
- opt = scriptedLookupService.lookup(['key':'World'])
- assertTrue(opt.present)
- assertEquals('there', opt.get())
- opt = scriptedLookupService.lookup(['key':'Not There'])
- assertFalse(opt.present)
-
- // Disable and load different script
- runner.disableControllerService(scriptedLookupService)
- runner.setProperty(scriptedLookupService, ScriptingComponentUtils.SCRIPT_FILE, ALTERNATE_TARGET_PATH.toString())
- runner.enableControllerService(scriptedLookupService)
-
- opt = scriptedLookupService.lookup(['key':'Hello'])
- assertTrue(opt.present)
- assertEquals('Goodbye', opt.get())
- opt = scriptedLookupService.lookup(['key':'World'])
- assertTrue(opt.present)
- assertEquals('Stranger', opt.get())
- opt = scriptedLookupService.lookup(['key':'Not There'])
- assertFalse(opt.present)
- }
-
- class MockScriptedLookupService extends ScriptedLookupService implements AccessibleScriptingComponentHelper {
-
- @Override
- ScriptingComponentHelper getScriptingComponentHelper() {
- return this.@scriptingComponentHelper
- }
- }
-}
diff --git a/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/src/test/groovy/org/apache/nifi/lookup/script/TestSimpleScriptedLookupService.groovy b/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/src/test/groovy/org/apache/nifi/lookup/script/TestSimpleScriptedLookupService.groovy
deleted file mode 100644
index 0c7f44a296ed..000000000000
--- a/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/src/test/groovy/org/apache/nifi/lookup/script/TestSimpleScriptedLookupService.groovy
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.lookup.script
-
-import org.apache.nifi.processor.AbstractProcessor
-import org.apache.nifi.processor.ProcessContext
-import org.apache.nifi.processor.ProcessSession
-import org.apache.nifi.processor.exception.ProcessException
-import org.apache.nifi.processors.script.AccessibleScriptingComponentHelper
-import org.apache.nifi.script.ScriptingComponentHelper
-import org.apache.nifi.script.ScriptingComponentUtils
-import org.apache.nifi.util.MockFlowFile
-import org.apache.nifi.util.TestRunner
-import org.apache.nifi.util.TestRunners
-import org.junit.jupiter.api.BeforeAll
-import org.junit.jupiter.api.BeforeEach
-import org.junit.jupiter.api.Test
-import org.slf4j.Logger
-import org.slf4j.LoggerFactory
-
-import java.nio.file.Files
-import java.nio.file.Path
-import java.nio.file.Paths
-import java.nio.file.StandardCopyOption
-
-import static org.junit.jupiter.api.Assertions.assertEquals
-import static org.junit.jupiter.api.Assertions.assertFalse
-import static org.junit.jupiter.api.Assertions.assertTrue
-
-/**
- * Unit tests for the SimpleScriptedLookupService controller service
- */
-class TestSimpleScriptedLookupService {
- private static final String GROOVY_SCRIPT = "test_lookup_inline.groovy"
- private static final Path SOURCE_PATH = Paths.get("src/test/resources/groovy", GROOVY_SCRIPT)
- private static final Path TARGET_PATH = Paths.get("target", GROOVY_SCRIPT)
- private static final Logger logger = LoggerFactory.getLogger(TestSimpleScriptedLookupService)
- SimpleScriptedLookupService scriptedLookupService
- def scriptingComponent
-
-
- @BeforeAll
- static void setUpOnce() throws Exception {
- logger.metaClass.methodMissing = {String name, args ->
- logger.info("[${name?.toUpperCase()}] ${(args as List).join(" ")}")
- }
- Files.copy(SOURCE_PATH, TARGET_PATH, StandardCopyOption.REPLACE_EXISTING)
- TARGET_PATH.toFile().deleteOnExit()
- }
-
- @BeforeEach
- void setUp() {
- scriptedLookupService = new MockScriptedLookupService()
- scriptingComponent = (AccessibleScriptingComponentHelper) scriptedLookupService
- }
-
- @Test
- void testSimpleLookupServiceGroovyScript() {
- final TestRunner runner = TestRunners.newTestRunner(new AbstractProcessor() {
- @Override
- public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
- }
- });
-
- runner.addControllerService("lookupService", scriptedLookupService);
- runner.setProperty(scriptedLookupService, "Script Engine", "Groovy");
- runner.setProperty(scriptedLookupService, ScriptingComponentUtils.SCRIPT_FILE, TARGET_PATH.toString());
- runner.setProperty(scriptedLookupService, ScriptingComponentUtils.SCRIPT_BODY, (String) null);
- runner.setProperty(scriptedLookupService, ScriptingComponentUtils.MODULES, (String) null);
- runner.enableControllerService(scriptedLookupService);
-
- def mockFlowFile = new MockFlowFile(1L)
- def inStream = new ByteArrayInputStream('Flow file content not used'.bytes)
-
- def opt = scriptedLookupService.lookup(['key':'Hello'])
- assertTrue(opt.present)
- assertEquals('Hi', opt.get())
- opt = scriptedLookupService.lookup(['key':'World'])
- assertTrue(opt.present)
- assertEquals('there', opt.get())
- opt = scriptedLookupService.lookup(['key':'Not There'])
- assertFalse(opt.present)
- }
-
- class MockScriptedLookupService extends SimpleScriptedLookupService implements AccessibleScriptingComponentHelper {
-
- @Override
- ScriptingComponentHelper getScriptingComponentHelper() {
- return this.@scriptingComponentHelper
- }
- }
-}
diff --git a/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/src/test/groovy/org/apache/nifi/processors/script/ExecuteScriptGroovyTest.groovy b/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/src/test/groovy/org/apache/nifi/processors/script/ExecuteScriptGroovyTest.groovy
deleted file mode 100644
index e56b0b07b556..000000000000
--- a/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/src/test/groovy/org/apache/nifi/processors/script/ExecuteScriptGroovyTest.groovy
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.processors.script
-
-import org.apache.nifi.script.ScriptingComponentUtils
-import org.apache.nifi.util.MockFlowFile
-import org.apache.nifi.util.TestRunners
-import org.junit.jupiter.api.BeforeAll
-import org.junit.jupiter.api.BeforeEach
-import org.junit.jupiter.api.Test
-import org.slf4j.Logger
-import org.slf4j.LoggerFactory
-
-import static org.junit.jupiter.api.Assertions.assertNotNull
-import static org.junit.jupiter.api.Assertions.assertTrue
-
-class ExecuteScriptGroovyTest extends BaseScriptTest {
- private static final Logger logger = LoggerFactory.getLogger(ExecuteScriptGroovyTest.class)
-
- @BeforeAll
- static void setUpOnce() throws Exception {
- logger.metaClass.methodMissing = { String name, args ->
- logger.info("[${name?.toUpperCase()}] ${(args as List).join(" ")}")
- }
- }
-
- @BeforeEach
- void setUp() throws Exception {
- super.setupExecuteScript()
-
- runner.setValidateExpressionUsage(false)
- runner.setProperty(scriptingComponent.getScriptingComponentHelper().SCRIPT_ENGINE, "Groovy")
- runner.setProperty(ScriptingComponentUtils.SCRIPT_FILE, TEST_RESOURCE_LOCATION + "groovy/testAddTimeAndThreadAttribute.groovy")
- runner.setProperty(ScriptingComponentUtils.MODULES, TEST_RESOURCE_LOCATION + "groovy")
- }
-
- private void setupPooledExecuteScript(int poolSize = 2) {
- final ExecuteScript executeScript = new ExecuteScript()
- // Need to do something to initialize the properties, like retrieve the list of properties
- assertNotNull(executeScript.getSupportedPropertyDescriptors())
- runner = TestRunners.newTestRunner(executeScript)
- runner.setValidateExpressionUsage(false)
- runner.setProperty(scriptingComponent.getScriptingComponentHelper().SCRIPT_ENGINE, "Groovy")
- runner.setProperty(ScriptingComponentUtils.SCRIPT_FILE, TEST_RESOURCE_LOCATION + "groovy/testAddTimeAndThreadAttribute.groovy")
- runner.setProperty(ScriptingComponentUtils.MODULES, TEST_RESOURCE_LOCATION + "groovy")
-
- // Override userContext value
- runner.processContext.maxConcurrentTasks = poolSize
- logger.info("Overrode userContext max concurrent tasks to ${runner.processContext.maxConcurrentTasks}")
- }
-
- @Test
- void testShouldExecuteScript() throws Exception {
- // Arrange
- final String SINGLE_POOL_THREAD_PATTERN = /pool-\d+-thread-1/
-
- logger.info("Mock flowfile queue contents: ${runner.queueSize} ${runner.flowFileQueue.queue}")
- runner.assertValid()
-
- // Act
- runner.run()
-
- // Assert
- runner.assertAllFlowFilesTransferred(ExecuteScript.REL_SUCCESS, 1)
- final List result = runner.getFlowFilesForRelationship(ExecuteScript.REL_SUCCESS)
- MockFlowFile flowFile = result.get(0)
- logger.info("Resulting flowfile attributes: ${flowFile.attributes}")
-
- flowFile.assertAttributeExists("time-updated")
- flowFile.assertAttributeExists("thread")
- assertTrue((flowFile.getAttribute("thread") =~ SINGLE_POOL_THREAD_PATTERN).find())
- }
-
- @Test
- void testShouldExecuteScriptSerially() throws Exception {
- // Arrange
- final int ITERATIONS = 10
-
- logger.info("Mock flowfile queue contents: ${runner.queueSize} ${runner.flowFileQueue.queue}")
- runner.assertValid()
-
- // Act
- runner.run(ITERATIONS)
-
- // Assert
- runner.assertAllFlowFilesTransferred(ExecuteScript.REL_SUCCESS, ITERATIONS)
- final List result = runner.getFlowFilesForRelationship(ExecuteScript.REL_SUCCESS)
-
- result.eachWithIndex { MockFlowFile flowFile, int i ->
- logger.info("Resulting flowfile [${i}] attributes: ${flowFile.attributes}")
-
- flowFile.assertAttributeExists("time-updated")
- flowFile.assertAttributeExists("thread")
- assertTrue((flowFile.getAttribute("thread") =~ /pool-\d+-thread-1/).find())
- }
- }
-
- @Test
- void testShouldExecuteScriptWithPool() throws Exception {
- // Arrange
- final int ITERATIONS = 10
- final int POOL_SIZE = 2
-
- setupPooledExecuteScript(POOL_SIZE)
- logger.info("Set up ExecuteScript processor with pool size: ${POOL_SIZE}")
-
- runner.setThreadCount(POOL_SIZE)
-
- logger.info("Mock flowfile queue contents: ${runner.queueSize} ${runner.flowFileQueue.queue}")
- runner.assertValid()
-
- // Act
- runner.run(ITERATIONS)
-
- // Assert
- runner.assertAllFlowFilesTransferred(ExecuteScript.REL_SUCCESS, ITERATIONS)
- final List result = runner.getFlowFilesForRelationship(ExecuteScript.REL_SUCCESS)
-
- result.eachWithIndex { MockFlowFile flowFile, int i ->
- logger.info("Resulting flowfile [${i}] attributes: ${flowFile.attributes}")
-
- flowFile.assertAttributeExists("time-updated")
- flowFile.assertAttributeExists("thread")
- assertTrue((flowFile.getAttribute("thread") =~ /pool-\d+-thread-[1-${POOL_SIZE}]/).find())
- }
- }
-
- @Test
- void testExecuteScriptRecompileOnChange() throws Exception {
-
- runner.setProperty(ScriptingComponentUtils.SCRIPT_FILE, TEST_RESOURCE_LOCATION + "groovy/setAttributeHello_executescript.groovy")
- runner.enqueue('')
- runner.run()
-
- runner.assertAllFlowFilesTransferred(ExecuteScript.REL_SUCCESS, 1)
- List result = runner.getFlowFilesForRelationship(ExecuteScript.REL_SUCCESS)
- MockFlowFile flowFile = result.get(0)
- flowFile.assertAttributeExists('greeting')
- flowFile.assertAttributeEquals('greeting', 'hello')
- runner.clearTransferState()
-
- runner.setProperty(ScriptingComponentUtils.SCRIPT_FILE, TEST_RESOURCE_LOCATION + "groovy/setAttributeGoodbye_executescript.groovy")
- runner.enqueue('')
- runner.run()
-
- runner.assertAllFlowFilesTransferred(ExecuteScript.REL_SUCCESS, 1)
- result = runner.getFlowFilesForRelationship(ExecuteScript.REL_SUCCESS)
- flowFile = result.get(0)
- flowFile.assertAttributeExists('greeting')
- flowFile.assertAttributeEquals('greeting', 'good-bye')
- }
-}
diff --git a/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/src/test/groovy/org/apache/nifi/record/script/ScriptedReaderTest.groovy b/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/src/test/groovy/org/apache/nifi/record/script/ScriptedReaderTest.groovy
deleted file mode 100644
index 8cb5b2213ce9..000000000000
--- a/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/src/test/groovy/org/apache/nifi/record/script/ScriptedReaderTest.groovy
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.record.script
-
-import org.apache.nifi.processor.AbstractProcessor
-import org.apache.nifi.processor.ProcessContext
-import org.apache.nifi.processor.ProcessSession
-import org.apache.nifi.processor.exception.ProcessException
-import org.apache.nifi.processors.script.AccessibleScriptingComponentHelper
-import org.apache.nifi.script.ScriptingComponentHelper
-import org.apache.nifi.script.ScriptingComponentUtils
-import org.apache.nifi.serialization.RecordReader
-import org.apache.nifi.util.MockComponentLog
-import org.apache.nifi.util.TestRunner
-import org.apache.nifi.util.TestRunners
-import org.junit.jupiter.api.BeforeEach
-import org.junit.jupiter.api.Test
-
-import java.nio.file.Files
-import java.nio.file.Path
-import java.nio.file.Paths
-import java.nio.file.StandardCopyOption
-
-import static org.junit.jupiter.api.Assertions.assertEquals
-import static org.junit.jupiter.api.Assertions.assertNotNull
-import static org.junit.jupiter.api.Assertions.assertNull
-import static org.junit.jupiter.api.Assertions.assertTrue
-
-/**
- * Unit tests for the ScriptedReader class
- */
-class ScriptedReaderTest {
- private static final String READER_INLINE_SCRIPT = "test_record_reader_inline.groovy"
- private static final String READER_XML_SCRIPT = "test_record_reader_xml.groovy"
- private static final String READER_LOAD_SCRIPT = "test_record_reader_load_module.groovy"
- private static final String TEST_JAR = "test.jar"
- private static final String SOURCE_DIR = "src/test/resources"
- private static final String GROOVY_DIR = "groovy"
- private static final String JAR_DIR = "jar"
- private static final String TARGET_DIR = "target"
-
- def recordReaderFactory
- def runner
- def scriptingComponent
-
- @BeforeEach
- void setUp() {
- recordReaderFactory = new MockScriptedReader()
- runner = TestRunners
- scriptingComponent = (AccessibleScriptingComponentHelper) recordReaderFactory
- }
-
- @Test
- void testRecordReaderGroovyScript() {
- final TestRunner runner = TestRunners.newTestRunner(new AbstractProcessor() {
- @Override
- public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
- }
- });
-
- Path targetPath = Paths.get(TARGET_DIR, READER_INLINE_SCRIPT)
- targetPath.toFile().deleteOnExit()
- Files.copy(Paths.get(SOURCE_DIR, GROOVY_DIR, READER_INLINE_SCRIPT), targetPath, StandardCopyOption.REPLACE_EXISTING)
- runner.addControllerService("reader", recordReaderFactory);
- runner.setProperty(recordReaderFactory, "Script Engine", "Groovy");
- runner.setProperty(recordReaderFactory, ScriptingComponentUtils.SCRIPT_FILE, targetPath.toString());
- runner.setProperty(recordReaderFactory, ScriptingComponentUtils.SCRIPT_BODY, (String) null);
- runner.setProperty(recordReaderFactory, ScriptingComponentUtils.MODULES, (String) null);
- runner.enableControllerService(recordReaderFactory);
-
- byte[] contentBytes = 'Flow file content not used'.bytes
- InputStream inStream = new ByteArrayInputStream(contentBytes)
-
- RecordReader recordReader = recordReaderFactory.createRecordReader(Collections.emptyMap(), inStream, contentBytes.length,
- new MockComponentLog("id", recordReaderFactory))
- assertNotNull(recordReader)
-
- 3.times {
- def record = recordReader.nextRecord()
- assertNotNull(record)
- assertEquals(record.getAsInt('code'), record.getAsInt('id') * 100)
- }
- assertNull(recordReader.nextRecord())
- }
-
- @Test
- void testXmlRecordReaderGroovyScript() {
- final TestRunner runner = TestRunners.newTestRunner(new AbstractProcessor() {
- @Override
- public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
- }
- });
-
- Path targetPath = Paths.get(TARGET_DIR, READER_XML_SCRIPT)
- targetPath.toFile().deleteOnExit()
- Files.copy(Paths.get(SOURCE_DIR, GROOVY_DIR, READER_XML_SCRIPT), targetPath, StandardCopyOption.REPLACE_EXISTING)
- runner.addControllerService("reader", recordReaderFactory);
- runner.setProperty(recordReaderFactory, "Script Engine", "Groovy");
- runner.setProperty(recordReaderFactory, ScriptingComponentUtils.SCRIPT_FILE, targetPath.toString());
- runner.setProperty(recordReaderFactory, ScriptingComponentUtils.SCRIPT_BODY, (String) null);
- runner.setProperty(recordReaderFactory, ScriptingComponentUtils.MODULES, (String) null);
-
- def schemaText = '''
- [
- {"id": "int"},
- {"name": "string"},
- {"code": "int"}
- ]
- '''
- runner.setProperty(recordReaderFactory, 'schema.text', schemaText)
-
- def logger = new MockComponentLog('ScriptedReader', '')
- runner.enableControllerService(recordReaderFactory)
-
- Map schemaVariables = ['record.tag': 'myRecord']
-
- byte[] contentBytes = '''
-
-
- 1
- John
- 100
-
-
- 2
- Mary
- 200
-
-
- 3
- Ramon
- 300
-
-
- '''.bytes
-
- InputStream inStream = new ByteArrayInputStream(contentBytes)
-
- RecordReader recordReader = recordReaderFactory.createRecordReader(schemaVariables, inStream, contentBytes.length, logger)
- assertNotNull(recordReader)
-
- 3.times {
- def record = recordReader.nextRecord()
- assertNotNull(record)
- assertEquals(record.getAsInt('code'), record.getAsInt('id') * 100)
- }
- assertNull(recordReader.nextRecord())
- }
-
- @Test
- void testRecordReaderGroovyScriptChangeModuleDirectory() {
- final TestRunner runner = TestRunners.newTestRunner(new AbstractProcessor() {
- @Override
- public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
- }
- });
-
- Path targetPath = Paths.get(TARGET_DIR, READER_LOAD_SCRIPT)
- targetPath.toFile().deleteOnExit()
- Files.copy(Paths.get(SOURCE_DIR, GROOVY_DIR, READER_LOAD_SCRIPT), targetPath, StandardCopyOption.REPLACE_EXISTING)
- runner.addControllerService("reader", recordReaderFactory);
- runner.setProperty(recordReaderFactory, "Script Engine", "Groovy");
- runner.setProperty(recordReaderFactory, ScriptingComponentUtils.SCRIPT_FILE, targetPath.toString());
- runner.setProperty(recordReaderFactory, ScriptingComponentUtils.SCRIPT_BODY, (String) null);
- runner.setProperty(recordReaderFactory, ScriptingComponentUtils.MODULES, (String) null);
-
- boolean enableFailed;
- try {
- runner.enableControllerService(recordReaderFactory);
- enableFailed = false;
- } catch (final Throwable t) {
- enableFailed = true;
- // Expected
- }
- assertTrue(enableFailed)
-
- Path targetJar = Paths.get(TARGET_DIR, TEST_JAR)
- targetJar.toFile().deleteOnExit()
- Files.copy(Paths.get(SOURCE_DIR, JAR_DIR, TEST_JAR), targetJar, StandardCopyOption.REPLACE_EXISTING)
- runner.setProperty(recordReaderFactory, "Module Directory", targetJar.toString());
- runner.enableControllerService(recordReaderFactory)
-
- byte[] contentBytes = 'Flow file content not used'.bytes
- InputStream inStream = new ByteArrayInputStream(contentBytes)
-
- def recordReader = recordReaderFactory.createRecordReader(Collections.emptyMap(), inStream, contentBytes.length, new MockComponentLog("id", recordReaderFactory))
- assertNotNull(recordReader)
- }
-
- class MockScriptedReader extends ScriptedReader implements AccessibleScriptingComponentHelper {
-
- @Override
- ScriptingComponentHelper getScriptingComponentHelper() {
- return this.@scriptingComponentHelper
- }
- }
-}
diff --git a/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/src/test/groovy/org/apache/nifi/record/script/ScriptedRecordSetWriterTest.groovy b/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/src/test/groovy/org/apache/nifi/record/script/ScriptedRecordSetWriterTest.groovy
deleted file mode 100644
index 3ff9a3422f35..000000000000
--- a/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/src/test/groovy/org/apache/nifi/record/script/ScriptedRecordSetWriterTest.groovy
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.record.script
-
-import org.apache.nifi.processor.AbstractProcessor
-import org.apache.nifi.processor.ProcessContext
-import org.apache.nifi.processor.ProcessSession
-import org.apache.nifi.processor.exception.ProcessException
-import org.apache.nifi.processors.script.AccessibleScriptingComponentHelper
-import org.apache.nifi.script.ScriptingComponentHelper
-import org.apache.nifi.script.ScriptingComponentUtils
-import org.apache.nifi.serialization.RecordSetWriter
-import org.apache.nifi.serialization.SimpleRecordSchema
-import org.apache.nifi.serialization.record.MapRecord
-import org.apache.nifi.serialization.record.RecordField
-import org.apache.nifi.serialization.record.RecordFieldType
-import org.apache.nifi.serialization.record.RecordSet
-import org.apache.nifi.util.MockComponentLog
-import org.apache.nifi.util.TestRunner
-import org.apache.nifi.util.TestRunners
-import org.junit.jupiter.api.BeforeAll
-import org.junit.jupiter.api.BeforeEach
-import org.junit.jupiter.api.Test
-import org.slf4j.Logger
-import org.slf4j.LoggerFactory
-
-import java.nio.file.Files
-import java.nio.file.Path
-import java.nio.file.Paths
-import java.nio.file.StandardCopyOption
-
-import static org.junit.jupiter.api.Assertions.assertEquals
-import static org.junit.jupiter.api.Assertions.assertNotNull
-
-/**
- * Unit tests for the ScriptedReader class
- */
-class ScriptedRecordSetWriterTest {
-
- private static final Logger logger = LoggerFactory.getLogger(ScriptedRecordSetWriterTest)
- private static final String INLINE_GROOVY_PATH = "test_record_writer_inline.groovy"
- private static final String SOURCE_DIR = "src/test/resources/groovy"
- private static final Path TARGET_PATH = Paths.get("target", INLINE_GROOVY_PATH)
- MockScriptedWriter recordSetWriterFactory
- def runner
- def scriptingComponent
-
-
- @BeforeAll
- static void setUpOnce() throws Exception {
- logger.metaClass.methodMissing = {String name, args ->
- logger.info("[${name?.toUpperCase()}] ${(args as List).join(" ")}")
- }
- Files.copy(Paths.get(SOURCE_DIR, INLINE_GROOVY_PATH), TARGET_PATH, StandardCopyOption.REPLACE_EXISTING)
- TARGET_PATH.toFile().deleteOnExit()
- }
-
- @BeforeEach
- void setUp() {
- recordSetWriterFactory = new MockScriptedWriter()
- runner = TestRunners
- scriptingComponent = (AccessibleScriptingComponentHelper) recordSetWriterFactory
- }
-
- @Test
- void testRecordWriterGroovyScript() {
- final TestRunner runner = TestRunners.newTestRunner(new AbstractProcessor() {
- @Override
- public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
- }
- });
-
- runner.addControllerService("writer", recordSetWriterFactory);
- runner.setProperty(recordSetWriterFactory, "Script Engine", "Groovy");
- runner.setProperty(recordSetWriterFactory, ScriptingComponentUtils.SCRIPT_FILE, TARGET_PATH.toString());
- runner.setProperty(recordSetWriterFactory, ScriptingComponentUtils.SCRIPT_BODY, (String) null);
- runner.setProperty(recordSetWriterFactory, ScriptingComponentUtils.MODULES, (String) null);
- runner.enableControllerService(recordSetWriterFactory);
-
- def schema = recordSetWriterFactory.getSchema(Collections.emptyMap(), null)
-
- ByteArrayOutputStream outputStream = new ByteArrayOutputStream()
- RecordSetWriter recordSetWriter = recordSetWriterFactory.createWriter(new MockComponentLog('id', recordSetWriterFactory), schema, outputStream, Collections.emptyMap())
- assertNotNull(recordSetWriter)
-
- def recordSchema = new SimpleRecordSchema(
- [new RecordField('id', RecordFieldType.INT.dataType),
- new RecordField('name', RecordFieldType.STRING.dataType),
- new RecordField('code', RecordFieldType.INT.dataType)]
- )
-
- def records = [
- new MapRecord(recordSchema, ['id': 1, 'name': 'John', 'code': 100]),
- new MapRecord(recordSchema, ['id': 2, 'name': 'Mary', 'code': 200]),
- new MapRecord(recordSchema, ['id': 3, 'name': 'Ramon', 'code': 300])
- ] as MapRecord[]
-
- recordSetWriter.write(RecordSet.of(recordSchema, records))
-
- def xml = new XmlSlurper().parseText(outputStream.toString())
- assertEquals('1', xml.record[0].id.toString())
- assertEquals('200', xml.record[1].code.toString())
- assertEquals('Ramon', xml.record[2].name.toString())
- }
-
- class MockScriptedWriter extends ScriptedRecordSetWriter implements AccessibleScriptingComponentHelper {
-
- @Override
- ScriptingComponentHelper getScriptingComponentHelper() {
- return this.@scriptingComponentHelper
- }
- }
-}
diff --git a/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/src/test/groovy/org/apache/nifi/reporting/script/ScriptedReportingTaskTest.groovy b/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/src/test/groovy/org/apache/nifi/reporting/script/ScriptedReportingTaskTest.groovy
deleted file mode 100644
index 55f777c4cc74..000000000000
--- a/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/src/test/groovy/org/apache/nifi/reporting/script/ScriptedReportingTaskTest.groovy
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.reporting.script
-
-import org.apache.nifi.components.PropertyDescriptor
-import org.apache.nifi.controller.ConfigurationContext
-import org.apache.nifi.logging.ComponentLog
-import org.apache.nifi.processors.script.AccessibleScriptingComponentHelper
-import org.apache.nifi.provenance.ProvenanceEventRecord
-import org.apache.nifi.registry.VariableRegistry
-import org.apache.nifi.reporting.ReportingInitializationContext
-import org.apache.nifi.script.ScriptingComponentHelper
-import org.apache.nifi.script.ScriptingComponentUtils
-import org.apache.nifi.util.MockConfigurationContext
-import org.apache.nifi.util.MockEventAccess
-import org.apache.nifi.util.MockReportingContext
-import org.apache.nifi.util.TestRunners
-import org.junit.jupiter.api.BeforeEach
-import org.junit.jupiter.api.Test
-
-import java.nio.file.Files
-import java.nio.file.Path
-import java.nio.file.Paths
-import java.nio.file.StandardCopyOption
-
-import static org.junit.jupiter.api.Assertions.assertEquals
-import static org.junit.jupiter.api.Assertions.assertTrue
-import static org.mockito.Mockito.*
-/**
- * Unit tests for ScriptedReportingTask.
- */
-
-class ScriptedReportingTaskTest {
- private static final String PROVENANCE_EVENTS_SCRIPT = "test_log_provenance_events.groovy"
- private static final String LOG_VM_STATS = "test_log_vm_stats.groovy"
- private static final String SOURCE_DIR = "src/test/resources/groovy"
- private static final String TARGET_DIR = "target"
-
- def task
- def runner
- def scriptingComponent
-
- @BeforeEach
- void setUp() {
- task = new MockScriptedReportingTask()
- runner = TestRunners
- scriptingComponent = (AccessibleScriptingComponentHelper) task
- }
-
- @Test
- void testProvenanceGroovyScript() {
- final Map properties = new HashMap<>();
- properties.put(new PropertyDescriptor.Builder().name("Script Engine").build(), "Groovy");
-
- Path targetPath = Paths.get(TARGET_DIR, PROVENANCE_EVENTS_SCRIPT)
- targetPath.toFile().deleteOnExit()
- Files.copy(Paths.get(SOURCE_DIR, PROVENANCE_EVENTS_SCRIPT), targetPath, StandardCopyOption.REPLACE_EXISTING)
- properties.put(ScriptingComponentUtils.SCRIPT_FILE, targetPath.toString());
-
- final ConfigurationContext configurationContext = new MockConfigurationContext(properties, null)
-
- final MockReportingContext context = new MockReportingContext([:], null, VariableRegistry.EMPTY_REGISTRY)
- context.setProperty("Script Engine", "Groovy")
- context.setProperty(ScriptingComponentUtils.SCRIPT_FILE.name, targetPath.toString())
-
- final MockEventAccess eventAccess = context.getEventAccess();
- 4.times { i ->
- eventAccess.addProvenanceEvent(createProvenanceEvent(i))
- }
-
- def logger = mock(ComponentLog)
- def initContext = mock(ReportingInitializationContext)
- when(initContext.getIdentifier()).thenReturn(UUID.randomUUID().toString())
- when(initContext.getLogger()).thenReturn(logger)
-
- task.initialize initContext
- task.getSupportedPropertyDescriptors()
-
- task.setup configurationContext
- task.onTrigger context
-
- // This script should return a variable x with the number of events and a variable e with the first event
- def sr = task.scriptRunner
- def se = sr.scriptEngine
- assertEquals 3, se.x
- assertEquals '1234', se.e.componentId
- assertEquals 'xyz', se.e.attributes.abc
- task.offerScriptRunner(sr)
- }
-
- private ProvenanceEventRecord createProvenanceEvent(final long id) {
- final ProvenanceEventRecord event = mock(ProvenanceEventRecord.class)
- doReturn(id).when(event).getEventId()
- doReturn('1234').when(event).getComponentId()
- doReturn(['abc': 'xyz']).when(event).getAttributes()
- return event;
- }
-
-
- @Test
- void testVMEventsGroovyScript() {
- final Map properties = new HashMap<>();
- properties.put(new PropertyDescriptor.Builder().name("Script Engine").build(), "Groovy");
-
- Path targetPath = Paths.get(TARGET_DIR, LOG_VM_STATS)
- targetPath.toFile().deleteOnExit()
- Files.copy(Paths.get(SOURCE_DIR, LOG_VM_STATS), targetPath, StandardCopyOption.REPLACE_EXISTING)
- properties.put(ScriptingComponentUtils.SCRIPT_FILE, targetPath.toString());
-
- final ConfigurationContext configurationContext = new MockConfigurationContext(properties, null)
-
- final MockReportingContext context = new MockReportingContext([:], null, VariableRegistry.EMPTY_REGISTRY)
- context.setProperty("Script Engine", "Groovy")
- context.setProperty(ScriptingComponentUtils.SCRIPT_FILE.name, targetPath.toString());
-
- def logger = mock(ComponentLog)
- def initContext = mock(ReportingInitializationContext)
- when(initContext.getIdentifier()).thenReturn(UUID.randomUUID().toString())
- when(initContext.getLogger()).thenReturn(logger)
-
- task.initialize initContext
- task.getSupportedPropertyDescriptors()
-
- task.setup configurationContext
- task.onTrigger context
- def sr = task.scriptRunner
- def se = sr.scriptEngine
- // This script should store a variable called x with a map of stats to values
- assertTrue se.x?.uptime >= 0
- task.offerScriptRunner(sr)
-
- }
-
- @Test
- void testVMEventsJythonScript() {
- final Map properties = new HashMap<>();
- properties.put(new PropertyDescriptor.Builder().name("Script Engine").build(), "Groovy");
-
- Path targetPath = Paths.get(TARGET_DIR, LOG_VM_STATS)
- targetPath.toFile().deleteOnExit()
- Files.copy(Paths.get(SOURCE_DIR, LOG_VM_STATS), targetPath, StandardCopyOption.REPLACE_EXISTING)
- properties.put(ScriptingComponentUtils.SCRIPT_FILE, targetPath.toString());
-
- final ConfigurationContext configurationContext = new MockConfigurationContext(properties, null)
-
- final MockReportingContext context = new MockReportingContext([:], null, VariableRegistry.EMPTY_REGISTRY)
- context.setProperty("Script Engine", "Groovy")
- context.setProperty(ScriptingComponentUtils.SCRIPT_FILE.name, targetPath.toString());
-
- def logger = mock(ComponentLog)
- def initContext = mock(ReportingInitializationContext)
- when(initContext.getIdentifier()).thenReturn(UUID.randomUUID().toString())
- when(initContext.getLogger()).thenReturn(logger)
-
- task.initialize initContext
- task.getSupportedPropertyDescriptors()
-
- task.setup configurationContext
- task.onTrigger context
- def sr = task.scriptRunner
- def se = sr.scriptEngine
- // This script should store a variable called x with a map of stats to values
- assertTrue se.x?.uptime >= 0
- task.offerScriptRunner(sr)
-
- }
-
- class MockScriptedReportingTask extends ScriptedReportingTask implements AccessibleScriptingComponentHelper {
- def getScriptRunner() {
- return scriptingComponentHelper.scriptRunnerQ.poll()
- }
-
- def offerScriptRunner(runner) {
- scriptingComponentHelper.scriptRunnerQ.offer(runner)
- }
-
- @Override
- ScriptingComponentHelper getScriptingComponentHelper() {
- return this.@scriptingComponentHelper
- }
- }
-}
\ No newline at end of file
diff --git a/nifi-registry/nifi-registry-core/nifi-registry-framework/pom.xml b/nifi-registry/nifi-registry-core/nifi-registry-framework/pom.xml
index 410443875335..312a0119d485 100644
--- a/nifi-registry/nifi-registry-core/nifi-registry-framework/pom.xml
+++ b/nifi-registry/nifi-registry-core/nifi-registry-framework/pom.xml
@@ -420,22 +420,6 @@
-
- org.spockframework
- spock-core
- test
-
-
- org.codehaus.groovy
- groovy-test
- test
-
-
- cglib
- cglib-nodep
- 2.2.2
- test
-
com.unboundid
unboundid-ldapsdk
diff --git a/nifi-registry/nifi-registry-core/nifi-registry-framework/src/test/groovy/org/apache/nifi/registry/security/authorization/AuthorizerFactorySpec.groovy b/nifi-registry/nifi-registry-core/nifi-registry-framework/src/test/groovy/org/apache/nifi/registry/security/authorization/AuthorizerFactorySpec.groovy
deleted file mode 100644
index 60a108443345..000000000000
--- a/nifi-registry/nifi-registry-core/nifi-registry-framework/src/test/groovy/org/apache/nifi/registry/security/authorization/AuthorizerFactorySpec.groovy
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.registry.security.authorization
-
-import org.apache.nifi.registry.extension.ExtensionClassLoader
-import org.apache.nifi.registry.extension.ExtensionManager
-import org.apache.nifi.registry.properties.NiFiRegistryProperties
-import org.apache.nifi.registry.security.authorization.resource.ResourceFactory
-import org.apache.nifi.registry.security.identity.IdentityMapper
-import org.apache.nifi.registry.service.RegistryService
-import spock.lang.Specification
-
-import javax.sql.DataSource
-
-class AuthorizerFactorySpec extends Specification {
-
- def mockProperties = Mock(NiFiRegistryProperties)
- def mockExtensionManager = Mock(ExtensionManager)
- def mockRegistryService = Mock(RegistryService)
- def mockDataSource = Mock(DataSource)
- def mockIdentityMapper = Mock(IdentityMapper)
-
- AuthorizerFactory authorizerFactory
-
- // runs before every feature method
- def setup() {
- mockExtensionManager.getExtensionClassLoader(_) >> new ExtensionClassLoader("/tmp", new URL[0],this.getClass().getClassLoader())
- mockProperties.getPropertyKeys() >> new HashSet() // Called by IdentityMappingUtil.getIdentityMappings()
-
- authorizerFactory = new AuthorizerFactory(mockProperties, mockExtensionManager, null, mockRegistryService, mockDataSource, mockIdentityMapper)
- }
-
- // runs after every feature method
- def cleanup() {
- authorizerFactory = null
- }
-
- // runs before the first feature method
- def setupSpec() {}
-
- // runs after the last feature method
- def cleanupSpec() {}
-
- def "create default authorizer"() {
-
- setup: "properties indicate nifi-registry is unsecured"
- mockProperties.getProperty(NiFiRegistryProperties.WEB_HTTPS_PORT) >> ""
-
- when: "getAuthorizer() is first called"
- def authorizer = authorizerFactory.getAuthorizer()
-
- then: "the default authorizer is returned"
- authorizer != null
-
- and: "any authorization request made to that authorizer is approved"
- def authorizationResult = authorizer.authorize(getTestAuthorizationRequest())
- authorizationResult.result == AuthorizationResult.Result.Approved
-
- }
-
- def "create file-backed authorizer"() {
-
- setup:
- setMockPropsAuthorizersConfig("src/test/resources/security/authorizers-good-file-providers.xml", "managed-authorizer")
-
- when: "getAuthorizer() is first called"
- def authorizer = authorizerFactory.getAuthorizer()
-
- then: "an authorizer is returned with the expected providers"
- authorizer != null
- authorizer instanceof ManagedAuthorizer
- def apProvider = ((ManagedAuthorizer) authorizer).getAccessPolicyProvider()
- apProvider instanceof ConfigurableAccessPolicyProvider
- def ugProvider = ((ConfigurableAccessPolicyProvider) apProvider).getUserGroupProvider()
- ugProvider instanceof ConfigurableUserGroupProvider
-
- }
-
- def "invalid authorizer configuration fails"() {
-
- when: "a bad configuration is provided and getAuthorizer() is called"
- setMockPropsAuthorizersConfig(authorizersConfigFile, selectedAuthorizer)
- authorizerFactory = new AuthorizerFactory(mockProperties, mockExtensionManager, null, mockRegistryService, mockDataSource, mockIdentityMapper)
- authorizerFactory.getAuthorizer()
-
- then: "expect an exception"
- def e = thrown AuthorizerFactoryException
- e.message =~ expectedExceptionMessage || e.getCause().getMessage() =~ expectedExceptionMessage
-
- where:
- authorizersConfigFile | selectedAuthorizer | expectedExceptionMessage
- "src/test/resources/security/authorizers-good-file-providers.xml" | "" | "When running securely, the authorizer identifier must be specified in the nifi-registry.properties file."
- "src/test/resources/security/authorizers-good-file-providers.xml" | "non-existent-authorizer" | "The specified authorizer 'non-existent-authorizer' could not be found."
- "src/test/resources/security/authorizers-bad-ug-provider-ids.xml" | "managed-authorizer" | "Duplicate User Group Provider identifier in Authorizers configuration"
- "src/test/resources/security/authorizers-bad-ap-provider-ids.xml" | "managed-authorizer" | "Duplicate Access Policy Provider identifier in Authorizers configuration"
- "src/test/resources/security/authorizers-bad-authorizer-ids.xml" | "managed-authorizer" | "Duplicate Authorizer identifier in Authorizers configuration"
- "src/test/resources/security/authorizers-bad-composite.xml" | "managed-authorizer" | "Duplicate provider in Composite User Group Provider configuration"
- "src/test/resources/security/authorizers-bad-configurable-composite.xml" | "managed-authorizer" | "Duplicate provider in Composite Configurable User Group Provider configuration"
-
- }
-
- // Helper methods
-
- private void setMockPropsAuthorizersConfig(String filePath, String authorizer = "managed-authorizer") {
- mockProperties.getProperty(NiFiRegistryProperties.WEB_HTTPS_PORT) >> "443"
- mockProperties.getSslPort() >> 443 // required to be non-null to create authorizer
- mockProperties.getProperty(NiFiRegistryProperties.SECURITY_AUTHORIZERS_CONFIGURATION_FILE) >> filePath
- mockProperties.getAuthorizersConfigurationFile() >> new File(filePath)
- mockProperties.getProperty(NiFiRegistryProperties.SECURITY_AUTHORIZER) >> authorizer
- }
-
- private static AuthorizationRequest getTestAuthorizationRequest() {
- return new AuthorizationRequest.Builder()
- .resource(ResourceFactory.getBucketsResource())
- .action(RequestAction.WRITE)
- .accessAttempt(false)
- .anonymous(true)
- .build()
- }
-
-}
diff --git a/nifi-registry/nifi-registry-core/nifi-registry-framework/src/test/groovy/org/apache/nifi/registry/service/AuthorizationServiceSpec.groovy b/nifi-registry/nifi-registry-core/nifi-registry-framework/src/test/groovy/org/apache/nifi/registry/service/AuthorizationServiceSpec.groovy
deleted file mode 100644
index 8388262c9440..000000000000
--- a/nifi-registry/nifi-registry-core/nifi-registry-framework/src/test/groovy/org/apache/nifi/registry/service/AuthorizationServiceSpec.groovy
+++ /dev/null
@@ -1,630 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.registry.service
-
-import org.apache.nifi.registry.authorization.AccessPolicy
-import org.apache.nifi.registry.authorization.User
-import org.apache.nifi.registry.authorization.UserGroup
-import org.apache.nifi.registry.bucket.Bucket
-import org.apache.nifi.registry.exception.ResourceNotFoundException
-import org.apache.nifi.registry.security.authorization.*
-import org.apache.nifi.registry.security.authorization.AccessPolicy as AuthAccessPolicy
-import org.apache.nifi.registry.security.authorization.User as AuthUser
-import org.apache.nifi.registry.security.authorization.exception.AccessDeniedException
-import org.apache.nifi.registry.security.authorization.resource.Authorizable
-import org.apache.nifi.registry.security.authorization.resource.ResourceType
-import spock.lang.Specification
-
-class AuthorizationServiceSpec extends Specification {
-
- def registryService = Mock(RegistryService)
- def authorizableLookup = Mock(AuthorizableLookup)
- def userGroupProvider = Mock(ConfigurableUserGroupProvider)
- def accessPolicyProvider = Mock(ConfigurableAccessPolicyProvider)
-
- AuthorizationService authorizationService
-
- def setup() {
- accessPolicyProvider.getUserGroupProvider() >> userGroupProvider
- def standardAuthorizer = new StandardManagedAuthorizer(accessPolicyProvider, userGroupProvider)
- authorizationService = new AuthorizationService(authorizableLookup, standardAuthorizer, registryService)
- }
-
- // ----- User tests -------------------------------------------------------
-
- def "create user"() {
-
- setup:
- userGroupProvider.addUser(!null as AuthUser) >> {
- AuthUser u -> new AuthUser.Builder().identifier(u.identifier).identity(u.identity).build()
- }
- userGroupProvider.getGroups() >> new HashSet() // needed for converting user to DTO
- accessPolicyProvider.getAccessPolicies() >> new HashSet() // needed for converting user to DTO
-
- when: "new user is created successfully"
- def user = new User("id", "username")
- User createdUser = authorizationService.createUser(user)
-
- then: "created user has been assigned an identifier"
- with(createdUser) {
- identifier == "id"
- identity == "username"
- }
-
- }
-
- def "list users"() {
-
- setup:
- userGroupProvider.getUsers() >> [
- new AuthUser.Builder().identifier("user1").identity("username1").build(),
- new AuthUser.Builder().identifier("user2").identity("username2").build(),
- new AuthUser.Builder().identifier("user3").identity("username3").build(),
- ]
- userGroupProvider.getGroups() >> new HashSet()
- accessPolicyProvider.getAccessPolicies() >> new HashSet()
-
- when: "list of users is queried"
- def users = authorizationService.getUsers()
-
- then: "users are successfully returned as list of DTO objects"
- users != null
- users.size() == 3
- with(users[0]) {
- identifier == "user1"
- identity == "username1"
- }
- with(users[1]) {
- identifier == "user2"
- identity == "username2"
- }
- with(users[2]) {
- identifier == "user3"
- identity == "username3"
- }
-
- }
-
- def "get user"() {
-
- setup:
- def user1 = new AuthUser.Builder().identifier("user-id-1").identity("user1").build()
- def group1 = new Group.Builder().identifier("group-id-1").name("group1").addUser("user-id-1").build()
- def apBuilder = new org.apache.nifi.registry.security.authorization.AccessPolicy.Builder().resource("/fake-resource").action(RequestAction.READ)
- def ap1 = apBuilder.identifier("policy-1").addUser("user-id-1").build()
- def ap2 = apBuilder.identifier("policy-2").clearUsers().addGroup("group-id-1").build()
- def ap3 = apBuilder.identifier("policy-3").clearGroups().addGroup("does-not-exist").build()
- userGroupProvider.getUser("does-not-exist") >> null
- userGroupProvider.getUser("user-id-1") >> user1
- userGroupProvider.getGroup("group-id-1") >> group1
- userGroupProvider.getGroup("does-not-exist") >> null
- userGroupProvider.getGroups() >> new HashSet([group1])
- accessPolicyProvider.getAccessPolicies() >> new HashSet<>([ap1, ap2, ap3])
-
-
- when: "get user for existing user identifier"
- def userDto1 = authorizationService.getUser("user-id-1")
-
- then: "user is returned converted to DTO"
- with(userDto1) {
- identifier == "user-id-1"
- identity == "user1"
- userGroups.size() == 1
- userGroups[0].identifier == "group-id-1"
- accessPolicies.size() == 2
- accessPolicies.stream().noneMatch({it.identifier == "policy-3"})
- }
-
-
- when: "get user for non-existent tenant identifier"
- def user2 = authorizationService.getUser("does-not-exist")
-
- then: "no user is returned"
- thrown(ResourceNotFoundException.class)
-
- }
-
- def "update user"() {
-
- setup:
- userGroupProvider.updateUser(!null as AuthUser) >> {
- AuthUser u -> new AuthUser.Builder().identifier(u.identifier).identity(u.identity).build()
- }
- userGroupProvider.getGroups() >> new HashSet()
- accessPolicyProvider.getAccessPolicies() >> new HashSet()
-
-
- when: "user is updated"
- def user = authorizationService.updateUser(new User("userId", "username"))
-
- then: "updated user is returned"
- with(user) {
- identifier == "userId"
- identity == "username"
- }
-
- }
-
- def "delete user"() {
-
- setup:
- def user1 = new AuthUser.Builder().identifier("userId").identity("username").build()
- userGroupProvider.getUser("userId") >> user1
- userGroupProvider.deleteUser(user1) >> user1
- userGroupProvider.getGroups() >> new HashSet()
- accessPolicyProvider.getAccessPolicies() >> new HashSet()
-
-
- when: "user is deleted"
- def user = authorizationService.deleteUser("userId")
-
- then: "deleted user is returned converted to DTO"
- with(user) {
- identifier == "userId"
- identity == "username"
- }
-
- }
-
- // ----- User Group tests -------------------------------------------------
-
- def "create user group"() {
-
- setup:
- userGroupProvider.addGroup(!null as Group) >> {
- Group g -> new Group.Builder().identifier(g.identifier).name(g.name).build()
- }
- accessPolicyProvider.getAccessPolicies() >> new HashSet() // needed for converting to DTO
-
- when: "new group is created successfully"
- def group = new UserGroup("id", "groupName")
- UserGroup createdGroup = authorizationService.createUserGroup(group)
-
- then: "created group has been assigned an identifier"
- with(createdGroup) {
- identifier == "id"
- identity == "groupName"
- }
-
- }
-
- def "list user groups"() {
-
- setup:
- userGroupProvider.getGroups() >> [
- new Group.Builder().identifier("groupId1").name("groupName1").build(),
- new Group.Builder().identifier("groupId2").name("groupName2").build(),
- new Group.Builder().identifier("groupId3").name("groupName3").build(),
- ]
- accessPolicyProvider.getAccessPolicies() >> new HashSet()
-
- when: "list of groups is queried"
- def groups = authorizationService.getUserGroups()
-
- then: "groups are successfully returned as list of DTO objects"
- groups != null
- groups.size() == 3
- with(groups[0]) {
- identifier == "groupId1"
- identity == "groupName1"
- }
- with(groups[1]) {
- identifier == "groupId2"
- identity == "groupName2"
- }
- with(groups[2]) {
- identifier == "groupId3"
- identity == "groupName3"
- }
-
- }
-
- def "get user group"() {
-
- setup:
- accessPolicyProvider.getAccessPolicies() >> new HashSet()
-
-
- when: "get group for existing user identifier"
- userGroupProvider.getGroup("groupId") >> new Group.Builder().identifier("groupId").name ("groupName").build()
- def g1 = authorizationService.getUserGroup("groupId")
-
- then: "group is returned converted to DTO"
- with(g1) {
- identifier == "groupId"
- identity == "groupName"
- }
-
-
- when: "get group for non-existent group identifier"
- userGroupProvider.getUser("nonExistentId") >> null
- userGroupProvider.getGroup("nonExistentId") >> null
- def g2 = authorizationService.getUserGroup("nonExistentId")
-
- then: "no group is returned"
- thrown(ResourceNotFoundException.class)
-
- }
-
- def "update user group"() {
-
- setup:
- userGroupProvider.updateGroup(!null as Group) >> {
- Group g -> new Group.Builder().identifier(g.identifier).name(g.name).build()
- }
- accessPolicyProvider.getAccessPolicies() >> new HashSet()
-
-
- when: "group is updated"
- def group = authorizationService.updateUserGroup(new UserGroup("id", "name"))
-
- then: "updated group is returned converted to DTO"
- with(group) {
- identifier == "id"
- identity == "name"
- }
-
- }
-
- def "delete user group"() {
-
- setup:
- def group1 = new Group.Builder().identifier("id").name("name").build();
- userGroupProvider.getGroup("id") >> group1
- userGroupProvider.deleteGroup(group1) >> group1
- accessPolicyProvider.getAccessPolicies() >> new HashSet()
-
-
- when: "group is deleted"
- def group = authorizationService.deleteUserGroup("id")
-
- then: "deleted user is returned"
- with(group) {
- identifier == "id"
- identity == "name"
- }
-
- }
-
- // ----- Access Policy tests ----------------------------------------------
-
- def "create access policy"() {
-
- setup:
- accessPolicyProvider.addAccessPolicy(!null as AuthAccessPolicy) >> {
- AuthAccessPolicy p -> new AuthAccessPolicy.Builder()
- .identifier(p.identifier)
- .resource(p.resource)
- .action(p.action)
- .addGroups(p.groups)
- .addUsers(p.users)
- .build()
- }
- accessPolicyProvider.isConfigurable(_ as AuthAccessPolicy) >> true
-
-
- when: "new access policy is created successfully"
- def accessPolicy = new AccessPolicy([resource: "/resource", action: "read"])
- accessPolicy.setIdentifier("id")
-
- def createdPolicy = authorizationService.createAccessPolicy(accessPolicy)
-
- then: "created policy has been assigned an identifier"
- with(createdPolicy) {
- identifier == "id"
- resource == "/resource"
- action == "read"
- configurable == true
- }
-
- }
-
- def "list access policies"() {
-
- setup:
- accessPolicyProvider.getAccessPolicies() >> [
- new AuthAccessPolicy.Builder().identifier("ap1").resource("r1").action(RequestAction.READ).build(),
- new AuthAccessPolicy.Builder().identifier("ap2").resource("r2").action(RequestAction.WRITE).build()
- ]
-
- when: "list access polices is queried"
- def policies = authorizationService.getAccessPolicies()
-
- then: "access policies are successfully returned as list of DTO objects"
- policies != null
- policies.size() == 2
- with(policies[0]) {
- identifier == "ap1"
- resource == "r1"
- action == RequestAction.READ.toString()
- }
- with(policies[1]) {
- identifier == "ap2"
- resource == "r2"
- action == RequestAction.WRITE.toString()
- }
-
- }
-
- def "get access policy"() {
-
- when: "get policy for existing identifier"
- accessPolicyProvider.getAccessPolicy("id") >> new AuthAccessPolicy.Builder()
- .identifier("id")
- .resource("/resource")
- .action(RequestAction.READ)
- .build()
- def p1 = authorizationService.getAccessPolicy("id")
-
- then: "policy is returned converted to DTO"
- with(p1) {
- identifier == "id"
- resource == "/resource"
- action == RequestAction.READ.toString()
- }
-
-
- when: "get policy for non-existent identifier"
- accessPolicyProvider.getAccessPolicy("nonExistentId") >> null
- def p2 = authorizationService.getAccessPolicy("nonExistentId")
-
- then: "no policy is returned"
- thrown(ResourceNotFoundException.class)
-
- }
-
- def "update access policy"() {
-
- setup:
- def users = [
- "user1": "alice",
- "user2": "bob",
- "user3": "charlie" ]
- def groups = [
- "group1": "users",
- "group2": "devs",
- "group3": "admins" ]
- def policies = [
- "policy1": [
- "resource": "/resource1",
- "action": "read",
- "users": [ "user1" ],
- "groups": []
- ]
- ]
- def mapDtoUser = { String id -> new User(id, users[id])}
- def mapDtoGroup = { String id -> new UserGroup(id, groups[id])}
- def mapAuthUser = { String id -> new AuthUser.Builder().identifier(id).identity(users[id]).build() }
- def mapAuthGroup = { String id -> new Group.Builder().identifier(id).name(groups[id]).build() }
- def mapAuthAccessPolicy = {
- String id -> return new AuthAccessPolicy.Builder()
- .identifier(id)
- .resource(policies[id]["resource"] as String)
- .action(RequestAction.valueOfValue(policies[id]["action"] as String))
- .addUsers(policies[id]["users"] as Set)
- .addGroups(policies[id]["groups"] as Set)
- .build()
- }
- userGroupProvider.getUser(!null as String) >> { String id -> users.containsKey(id) ? mapAuthUser(id) : null }
- userGroupProvider.getGroup(!null as String) >> { String id -> groups.containsKey(id) ? mapAuthGroup(id) : null }
- userGroupProvider.getUsers() >> {
- def authUsers = []
- users.each{ k, v -> authUsers.add(new AuthUser.Builder().identifier(k).identity(v).build()) }
- return authUsers
- }
- userGroupProvider.getGroups() >> {
- def authGroups = []
- users.each{ k, v -> authGroups.add(new Group.Builder().identifier(k).name(v).build()) }
- return authGroups
- }
- accessPolicyProvider.getAccessPolicy(!null as String) >> { String id -> policies.containsKey(id) ? mapAuthAccessPolicy(id) : null }
- accessPolicyProvider.updateAccessPolicy(!null as AuthAccessPolicy) >> {
- AuthAccessPolicy p -> new AuthAccessPolicy.Builder()
- .identifier(p.identifier)
- .resource(p.resource)
- .action(p.action)
- .addGroups(p.groups)
- .addUsers(p.users)
- .build()
- }
- accessPolicyProvider.isConfigurable(_ as AuthAccessPolicy) >> true
-
-
- when: "policy is updated"
- def policy = new AccessPolicy([identifier: "policy1", resource: "/resource1", action: "read"])
- policy.addUsers([mapDtoUser("user1"), mapDtoUser("user2")])
- policy.addUserGroups([mapDtoGroup("group1")])
- def p1 = authorizationService.updateAccessPolicy(policy)
-
- then: "updated group is returned converted to DTO"
- p1 != null
- p1.users.size() == 2
- def sortedUsers = p1.users.sort{it.identifier}
- with(sortedUsers[0]) {
- identifier == "user1"
- identity == "alice"
- }
- with(sortedUsers[1]) {
- identifier == "user2"
- identity == "bob"
- }
- p1.userGroups.size() == 1
- with(p1.userGroups[0]) {
- identifier == "group1"
- identity == "users"
- }
-
-
- when: "attempt to change policy resource and action"
- def p2 = authorizationService.updateAccessPolicy(new AccessPolicy([identifier: "policy1", resource: "/newResource", action: "write"]))
-
- then: "resource and action are unchanged"
- with(p2) {
- identifier == "policy1"
- resource == "/resource1"
- action == "read"
- }
-
- }
-
- def "delete access policy"() {
-
- setup:
- def policy1 = new AuthAccessPolicy.Builder()
- .identifier("policy1")
- .resource("/resource")
- .action(RequestAction.READ)
- .addGroups(new HashSet())
- .addUsers(new HashSet())
- .build()
-
- userGroupProvider.getGroups() >> new HashSet()
- userGroupProvider.getUsers() >> new HashSet()
- accessPolicyProvider.getAccessPolicy("id") >> policy1
- accessPolicyProvider.deleteAccessPolicy(!null as String) >> policy1
-
- when: "access policy is deleted"
- def policy = authorizationService.deleteAccessPolicy("id")
-
- then: "deleted policy is returned"
- with(policy) {
- identifier == "policy1"
- resource == "/resource"
- action == RequestAction.READ.toString()
- }
-
- }
-
- // ----- Resource tests ---------------------------------------------------
-
- def "get resources"() {
-
- setup:
- def buckets = [
- "b1": [
- "name": "Bucket #1",
- "description": "An initial bucket for testing",
- "createdTimestamp": 1
- ],
- "b2": [
- "name": "Bucket #2",
- "description": "A second bucket for testing",
- "createdTimestamp": 2
- ],
- ]
- def mapBucket = {
- String id -> new Bucket([
- identifier: id,
- name: buckets[id]["name"] as String,
- description: buckets[id]["description"] as String]) }
-
- registryService.getBuckets() >> {[ mapBucket("b1"), mapBucket("b2") ]}
-
- when:
- def resources = authorizationService.getResources()
-
- then:
- resources != null
- resources.size() == 8
- def sortedResources = resources.sort{it.identifier}
- sortedResources[0].identifier == "/actuator"
- sortedResources[1].identifier == "/buckets"
- sortedResources[2].identifier == "/buckets/b1"
- sortedResources[3].identifier == "/buckets/b2"
- sortedResources[4].identifier == "/policies"
- sortedResources[5].identifier == "/proxy"
- sortedResources[6].identifier == "/swagger"
- sortedResources[7].identifier == "/tenants"
-
- }
-
- def "get authorized resources"() {
-
- setup:
- def buckets = [
- "b1": [
- "name": "Bucket #1",
- "description": "An initial bucket for testing",
- "createdTimestamp": 1,
- "allowPublicRead" : false
- ],
- "b2": [
- "name": "Bucket #2",
- "description": "A second bucket for testing",
- "createdTimestamp": 2,
- "allowPublicRead" : true
- ],
- "b3": [
- "name": "Bucket #3",
- "description": "A third bucket for testing",
- "createdTimestamp": 3,
- "allowPublicRead" : false
- ]
- ]
- def mapBucket = {
- String id -> new Bucket([
- identifier: id,
- name: buckets[id]["name"] as String,
- description: buckets[id]["description"] as String,
- allowPublicRead: buckets[id]["allowPublicRead"]
- ]) }
-
- registryService.getBuckets() >> {[ mapBucket("b1"), mapBucket("b2"), mapBucket("b3") ]}
-
- def authorized = Mock(Authorizable)
- authorized.authorize(_, _, _) >> { return }
- def denied = Mock(Authorizable)
- denied.authorize(_, _, _) >> { throw new AccessDeniedException("") }
-
- authorizableLookup.getAuthorizableByResource("/actuator") >> denied
- authorizableLookup.getAuthorizableByResource("/buckets") >> authorized
- authorizableLookup.getAuthorizableByResource("/buckets/b1") >> authorized
- authorizableLookup.getAuthorizableByResource("/buckets/b2") >> authorized
- authorizableLookup.getAuthorizableByResource("/buckets/b3") >> denied
- authorizableLookup.getAuthorizableByResource("/policies") >> authorized
- authorizableLookup.getAuthorizableByResource("/proxy") >> denied
- authorizableLookup.getAuthorizableByResource("/swagger") >> denied
- authorizableLookup.getAuthorizableByResource("/tenants") >> authorized
-
-
- when:
- def resources = authorizationService.getAuthorizedResources(RequestAction.READ)
-
- then:
- resources != null
- resources.size() == 5
- def sortedResources = resources.sort{it.identifier}
- sortedResources[0].identifier == "/buckets"
- sortedResources[1].identifier == "/buckets/b1"
- sortedResources[2].identifier == "/buckets/b2"
- sortedResources[3].identifier == "/policies"
- sortedResources[4].identifier == "/tenants"
-
-
- when:
- def filteredResources = authorizationService.getAuthorizedResources(RequestAction.READ, ResourceType.Bucket)
-
- then:
- filteredResources != null
- filteredResources.size() == 3
- def sortedFilteredResources = filteredResources.sort{it.identifier}
- sortedFilteredResources[0].identifier == "/buckets"
- sortedFilteredResources[1].identifier == "/buckets/b1"
- sortedFilteredResources[2].identifier == "/buckets/b2"
- }
-
-}
diff --git a/nifi-registry/nifi-registry-core/nifi-registry-web-api/pom.xml b/nifi-registry/nifi-registry-core/nifi-registry-web-api/pom.xml
index 170c76b5bcd9..3247d7525005 100644
--- a/nifi-registry/nifi-registry-core/nifi-registry-web-api/pom.xml
+++ b/nifi-registry/nifi-registry-core/nifi-registry-web-api/pom.xml
@@ -395,23 +395,6 @@
6.0.9
test
-
- org.spockframework
- spock-core
- test
-
-
- org.codehaus.groovy
- *
-
-
-
-
- cglib
- cglib-nodep
- 2.2.2
- test
-
org.eclipse.jetty
jetty-util
@@ -426,18 +409,6 @@
com.google.guava
guava
-
- org.codehaus.groovy
- groovy-json
- 2.5.18
- test
-
-
- org.codehaus.groovy
- groovy
- 2.5.18
- test
-
org.springframework.security
spring-security-oauth2-resource-server
diff --git a/nifi-registry/pom.xml b/nifi-registry/pom.xml
index 583e7c97971b..f89c6138968f 100644
--- a/nifi-registry/pom.xml
+++ b/nifi-registry/pom.xml
@@ -176,31 +176,6 @@
runtime
zip
-
-
- org.spockframework
- spock-core
- 2.3-groovy-3.0
- test
-
-
- org.codehaus.groovy
- *
-
-
-
-
- org.codehaus.groovy
- groovy-test
- ${nifi.groovy.version}
- test
-
-
- org.codehaus.groovy
- groovy-cli-commons
- ${nifi.groovy.version}
- test
-
org.testcontainers
mysql
@@ -236,12 +211,6 @@
-
- org.codehaus.groovy
- groovy-eclipse-compiler
- ${groovy.eclipse.compiler.version}
- true
-
org.apache.maven.plugins
maven-war-plugin
@@ -304,38 +273,6 @@
-
- org.apache.maven.plugins
- maven-compiler-plugin
-
-
-
- groovy-tests
-
- testCompile
-
-
- groovy-eclipse-compiler
-
-
-
-
- ${maven.compiler.source}
- ${maven.compiler.target}
-
-
-
- org.codehaus.groovy
- groovy-eclipse-compiler
- ${groovy.eclipse.compiler.version}
-
-
- org.codehaus.groovy
- groovy-eclipse-batch
- ${groovy.eclipse.batch.version}
-
-
-
org.apache.maven.plugins
maven-failsafe-plugin
diff --git a/pom.xml b/pom.xml
index 9c65242bfcc7..6912a8be77cc 100644
--- a/pom.xml
+++ b/pom.xml
@@ -360,20 +360,6 @@
${nifi.groovy.version}
test
-
- org.spockframework
- spock-core
- 2.3-groovy-3.0
- test
-
-
-
- org.codehaus.groovy
- *
-
-
-
org.hamcrest
hamcrest-all
@@ -1046,6 +1032,8 @@
org.bouncycastle:bcmail-jdk15on
org.apache.sshd:*:[,2.9.1]
+
+ org.spockframework:*