From ae1c165c0e0c82643e0c1c2c3f4c112f0f8965c3 Mon Sep 17 00:00:00 2001 From: Stanislav Knot Date: Wed, 17 Jul 2019 13:04:43 +0200 Subject: [PATCH] Replace findbugs by spotbugs (#1761) * Replace findbugs by spotbugs * comments * rebase+fixup * spotbugs bamboozling * disable java11 :( * spobugs workaround * comment * fix? * rebase! * rebase --- .../spotbugs-exclude.xml | 4 +- .travis/build.sh | 4 +- Makefile | 4 +- Makefile.maven | 4 +- cluster-operator/pom.xml | 10 +-- .../io/strimzi/operator/cluster/Main.java | 2 + .../operator/cluster/model/AbstractModel.java | 8 +- .../assembly/KafkaAssemblyOperator.java | 2 +- docker-images/Makefile | 2 +- examples/Makefile | 2 +- helm-charts/Makefile | 2 +- install/Makefile | 2 +- metrics/Makefile | 2 +- pom.xml | 32 ++++---- systemtest/pom.xml | 10 +++ .../strimzi/systemtest/AbstractResources.java | 9 ++- .../io/strimzi/systemtest/AbstractST.java | 20 ++++- .../strimzi/systemtest/HttpBridgeBaseST.java | 4 +- .../java/io/strimzi/systemtest/Resources.java | 3 +- .../clients/api/VerifiableClient.java | 1 + .../clients/lib/KafkaClientProperties.java | 6 +- .../systemtest/utils/LogCollector.java | 2 + .../io/strimzi/systemtest/utils/StUtils.java | 61 ++++++++++----- .../io/strimzi/systemtest/AllNamespaceST.java | 8 +- .../io/strimzi/systemtest/ConnectS2IST.java | 6 +- .../java/io/strimzi/systemtest/ConnectST.java | 4 +- .../systemtest/CustomResourceStatusST.java | 12 +-- .../java/io/strimzi/systemtest/KafkaST.java | 68 ++++++++-------- .../io/strimzi/systemtest/LogSettingST.java | 24 +++--- .../io/strimzi/systemtest/MirrorMakerST.java | 16 ++-- .../systemtest/MultipleNamespaceST.java | 4 +- .../io/strimzi/systemtest/RecoveryST.java | 78 +++++++++---------- .../strimzi/systemtest/RollingUpdateST.java | 14 ++-- .../io/strimzi/systemtest/SecurityST.java | 2 +- .../java/io/strimzi/systemtest/UserST.java | 2 +- .../systemtest/bridge/HttpBridgeST.java | 8 +- .../bridge/HttpBridgeScramShaST.java | 10 +-- .../systemtest/bridge/HttpBridgeTlsST.java | 10 +-- .../test_client/HttpClientsListener.java | 3 + topic-operator/pom.xml | 4 + .../topic/OperatorAssignedKafkaImpl.java | 8 +- user-operator/pom.xml | 4 + .../java/io/strimzi/operator/user/Main.java | 2 + 43 files changed, 283 insertions(+), 200 deletions(-) rename .findbugs/findbugs-exclude.xml => .spotbugs/spotbugs-exclude.xml (88%) diff --git a/.findbugs/findbugs-exclude.xml b/.spotbugs/spotbugs-exclude.xml similarity index 88% rename from .findbugs/findbugs-exclude.xml rename to .spotbugs/spotbugs-exclude.xml index ff54ac57a96..6c0e6bae2ae 100644 --- a/.findbugs/findbugs-exclude.xml +++ b/.spotbugs/spotbugs-exclude.xml @@ -1,5 +1,8 @@ + + + @@ -16,5 +19,4 @@ - \ No newline at end of file diff --git a/.travis/build.sh b/.travis/build.sh index 05c1f074f4c..3dd0b826c27 100755 --- a/.travis/build.sh +++ b/.travis/build.sh @@ -20,9 +20,7 @@ export DOCKER_REGISTRY=${DOCKER_REGISTRY:-docker.io} export DOCKER_TAG=$COMMIT make docu_check -if [ "${MAIN_BUILD}" = "TRUE" ] ; then - make findbugs -fi +make spotbugs make crd_install make helm_install diff --git a/Makefile b/Makefile index 1ce9c1aa6e3..4baeaf54865 100644 --- a/Makefile +++ b/Makefile @@ -114,7 +114,7 @@ docu_htmlnoheader: docu_htmlnoheaderclean docu_versions docu_check docu_check: ./.travis/check_docs.sh -findbugs: $(SUBDIRS) +spotbugs: $(SUBDIRS) docu_pushtowebsite: docu_htmlnoheader docu_html ./.travis/docu-push-to-website.sh @@ -145,4 +145,4 @@ crd_install: install $(SUBDIRS): $(MAKE) -C $@ $(MAKECMDGOALS) -.PHONY: all $(SUBDIRS) $(DOCKER_TARGETS) systemtests docu_versions findbugs docu_check +.PHONY: all $(SUBDIRS) $(DOCKER_TARGETS) systemtests docu_versions spotbugs docu_check diff --git a/Makefile.maven b/Makefile.maven index b2a3e324999..517413e8b6e 100644 --- a/Makefile.maven +++ b/Makefile.maven @@ -18,5 +18,5 @@ java_clean: echo "Cleaning Maven build ..." mvn clean -findbugs: - mvn $(MVN_ARGS) org.codehaus.mojo:findbugs-maven-plugin:check +spotbugs: + mvn $(MVN_ARGS) spotbugs:check diff --git a/cluster-operator/pom.xml b/cluster-operator/pom.xml index a6caa9105c9..ab11e3cb012 100644 --- a/cluster-operator/pom.xml +++ b/cluster-operator/pom.xml @@ -17,12 +17,6 @@ - - com.google.code.findbugs - findbugs-annotations - 3.0.1 - provided - io.strimzi api @@ -121,6 +115,10 @@ test-jar test + + com.github.spotbugs + spotbugs-annotations + diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/Main.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/Main.java index 35cac4a240c..5cd22d41d33 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/Main.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/Main.java @@ -4,6 +4,7 @@ */ package io.strimzi.operator.cluster; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import io.fabric8.kubernetes.api.model.rbac.ClusterRole; import io.fabric8.kubernetes.client.DefaultKubernetesClient; import io.fabric8.kubernetes.client.KubernetesClient; @@ -33,6 +34,7 @@ import java.util.Map; import java.util.stream.Collectors; +@SuppressFBWarnings("DM_EXIT") public class Main { private static final Logger log = LogManager.getLogger(Main.class.getName()); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractModel.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractModel.java index 466286c5f3a..5a978e753e3 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractModel.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractModel.java @@ -4,7 +4,6 @@ */ package io.strimzi.operator.cluster.model; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import io.fabric8.kubernetes.api.model.Affinity; import io.fabric8.kubernetes.api.model.ConfigMap; import io.fabric8.kubernetes.api.model.ConfigMapBuilder; @@ -291,7 +290,6 @@ protected OrderedProperties getDefaultLogConfig() { * @param configFileName The filename * @return The OrderedProperties */ - @SuppressFBWarnings("OBL_UNSATISFIED_OBLIGATION") // InputStream is closed by properties.addStringPairs public static OrderedProperties getOrderedProperties(String configFileName) { OrderedProperties properties = new OrderedProperties(); if (configFileName != null && !configFileName.isEmpty()) { @@ -303,6 +301,12 @@ public static OrderedProperties getOrderedProperties(String configFileName) { properties.addStringPairs(is); } catch (IOException e) { log.warn("Unable to read default log config from '{}'", configFileName); + } finally { + try { + is.close(); + } catch (IOException e) { + log.error("Failed to close stream. Reason: " + e.getMessage()); + } } } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperator.java index 8eb2d9451eb..3e9e6618dcd 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperator.java @@ -1152,7 +1152,7 @@ Future zkRollingUpdate() { */ Future zkScaleUpStep() { Future futss = zkSetOperations.getAsync(namespace, ZookeeperCluster.zookeeperClusterName(name)); - return withVoid(futss.map(ss -> ss == null ? 0 : ss.getSpec().getReplicas()) + return withVoid(futss.map(ss -> ss == null ? Integer.valueOf(0) : ss.getSpec().getReplicas()) .compose(currentReplicas -> { if (currentReplicas > 0 && zkCluster.getReplicas() > currentReplicas) { zkCluster.setReplicas(currentReplicas + 1); diff --git a/docker-images/Makefile b/docker-images/Makefile index 0e761854310..bd495ed9b89 100644 --- a/docker-images/Makefile +++ b/docker-images/Makefile @@ -19,5 +19,5 @@ docker_push: all: docker_build docker_push -.PHONY: build clean release all $(SUBDIRS) $(DOCKER_TARGETS) findbugs +.PHONY: build clean release all $(SUBDIRS) $(DOCKER_TARGETS) spotbugs diff --git a/examples/Makefile b/examples/Makefile index 44f501c8a6f..ce3bf3bda50 100644 --- a/examples/Makefile +++ b/examples/Makefile @@ -15,4 +15,4 @@ release: $(CP) -r ./user $(RELEASE_PATH)/ $(CP) -r ./topic $(RELEASE_PATH)/ -.PHONY: all build clean docker_build docker_push docker_tag findbugs +.PHONY: all build clean docker_build docker_push docker_tag spotbugs diff --git a/helm-charts/Makefile b/helm-charts/Makefile index 571f9254c24..e23e209de4c 100644 --- a/helm-charts/Makefile +++ b/helm-charts/Makefile @@ -62,4 +62,4 @@ docker_push: all: docker_build clean: helm_clean -.PHONY: build clean release findbugs +.PHONY: build clean release spotbugs diff --git a/install/Makefile b/install/Makefile index f7751d76fce..2ef848b3c21 100644 --- a/install/Makefile +++ b/install/Makefile @@ -43,4 +43,4 @@ release: $(CP) -r ./topic-operator $(RELEASE_PATH)/ $(CP) -r ./strimzi-admin $(RELEASE_PATH)/ -.PHONY: all build clean docker_build docker_push docker_tag findbugs +.PHONY: all build clean docker_build docker_push docker_tag spotbugs diff --git a/metrics/Makefile b/metrics/Makefile index 5ac979fe8df..f1b982313ef 100644 --- a/metrics/Makefile +++ b/metrics/Makefile @@ -17,4 +17,4 @@ release: $(CP) -r ./examples/prometheus/additional-properties/*.yaml $(RELEASE_PATH)/prometheus-additional-properties/ $(CP) -r ./examples/prometheus/alertmanager-config/*.yaml $(RELEASE_PATH)/prometheus-alertmanager-config/ -.PHONY: all build clean docker_build docker_push docker_tag findbugs \ No newline at end of file +.PHONY: all build clean docker_build docker_push docker_tag spotbugs \ No newline at end of file diff --git a/pom.xml b/pom.xml index 5a179c175f5..64a5eb95b80 100644 --- a/pom.xml +++ b/pom.xml @@ -64,7 +64,7 @@ 3.1.1 1.6 3.0.0 - 3.0.5 + 3.1.12 1.6.3 0.7.9 2.11 @@ -263,10 +263,6 @@ org.slf4j slf4j-log4j12 - - com.github.spotbugs - spotbugs-annotations - @@ -384,6 +380,11 @@ ${gson.version} test + + com.github.spotbugs + spotbugs-annotations + ${spotbugs.version} + @@ -588,24 +589,25 @@ - org.codehaus.mojo - findbugs-maven-plugin - ${findbugs.version} + com.github.spotbugs + spotbugs-maven-plugin + 3.1.12 + + + com.github.spotbugs + spotbugs + ${spotbugs.version} + - Max Low true - ${project.build.directory}/findbugs + ${project.build.directory}/spotbugs - ${project.basedir}/../.findbugs/findbugs-exclude.xml + ${project.basedir}/../.spotbugs/spotbugs-exclude.xml diff --git a/systemtest/pom.xml b/systemtest/pom.xml index 267ed7553ec..c97bb9d6327 100644 --- a/systemtest/pom.xml +++ b/systemtest/pom.xml @@ -144,6 +144,16 @@ ${vertx.version} compile + + + com.squareup.okhttp3 + okhttp + + + + com.github.spotbugs + spotbugs-annotations + diff --git a/systemtest/src/main/java/io/strimzi/systemtest/AbstractResources.java b/systemtest/src/main/java/io/strimzi/systemtest/AbstractResources.java index 8bcba2860e3..b4775ba33f3 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/AbstractResources.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/AbstractResources.java @@ -23,6 +23,7 @@ import io.fabric8.kubernetes.api.model.rbac.RoleBinding; import io.fabric8.kubernetes.api.model.rbac.RoleBindingList; import io.fabric8.kubernetes.client.BaseClient; +import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.dsl.MixedOperation; import io.fabric8.kubernetes.client.dsl.Resource; import io.fabric8.kubernetes.client.dsl.internal.CustomResourceOperationContext; @@ -50,6 +51,7 @@ import io.strimzi.api.kafka.model.KafkaTopic; import io.strimzi.api.kafka.model.KafkaUser; import io.strimzi.test.k8s.KubeClient; +import okhttp3.OkHttpClient; abstract class AbstractResources { @@ -71,7 +73,12 @@ public MixedOperation> MixedOperation> customResourcesWithCascading(Class resourceType, Class listClass, Class doneClass) { - CustomResourceOperationContext croc = new CustomResourceOperationContext().withOkhttpClient(((BaseClient) client().getClient()).getHttpClient()).withConfig(client().getClient().getConfiguration()) + OkHttpClient httpClient = null; + KubernetesClient kubernetesClient = client().getClient(); + if (kubernetesClient instanceof BaseClient) { + httpClient = ((BaseClient) kubernetesClient).getHttpClient(); + } + CustomResourceOperationContext croc = new CustomResourceOperationContext().withOkhttpClient(httpClient).withConfig(client().getClient().getConfiguration()) .withApiGroupName(Crds.kafka().getSpec().getGroup()) .withApiGroupVersion(Crds.kafka().getSpec().getVersion()) .withNamespace(client().getNamespace()) diff --git a/systemtest/src/main/java/io/strimzi/systemtest/AbstractST.java b/systemtest/src/main/java/io/strimzi/systemtest/AbstractST.java index d97bdb12d13..b66515913e4 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/AbstractST.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/AbstractST.java @@ -113,8 +113,8 @@ public abstract class AbstractST extends BaseITST implements TestSeparator { public static final String TEST_LOG_DIR = Environment.TEST_LOG_DIR; protected Resources testMethodResources; - protected static Resources testClassResources; - protected static String operationID; + private static Resources testClassResources; + private static String operationID; Random rng = new Random(); protected HelmClient helmClient() { @@ -437,10 +437,22 @@ public Resources testMethodResources() { return testMethodResources; } - public Resources testClassResources() { + public static String getOperationID() { + return operationID; + } + + public static void setOperationID(String operationID) { + AbstractST.operationID = operationID; + } + + public static Resources getTestClassResources() { return testClassResources; } + public static void setTestClassResources(Resources testClassResources) { + AbstractST.testClassResources = testClassResources; + } + String startTimeMeasuring(Operation operation) { TimeMeasuringSystem.setTestName(testClass, testName); return TimeMeasuringSystem.startOperation(operation); @@ -688,7 +700,7 @@ protected void recreateTestEnv(String coNamespace, List bindingsNamespac createNamespaces(coNamespace, bindingsNamespaces); applyClusterOperatorInstallFiles(); - testClassResources = new Resources(kubeClient()); + setTestClassResources(new Resources(kubeClient())); applyRoleBindings(coNamespace, bindingsNamespaces); // 050-Deployment diff --git a/systemtest/src/main/java/io/strimzi/systemtest/HttpBridgeBaseST.java b/systemtest/src/main/java/io/strimzi/systemtest/HttpBridgeBaseST.java index e1fd79663b9..2c4adb08140 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/HttpBridgeBaseST.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/HttpBridgeBaseST.java @@ -179,7 +179,7 @@ protected void deployBridgeNodePortService() throws InterruptedException { .withType("NodePort") .withSelector(map) .endSpec().build(); - testClassResources.createServiceResource(service, getBridgeNamespace()).done(); + getTestClassResources().createServiceResource(service, getBridgeNamespace()).done(); StUtils.waitForNodePortService(bridgeExternalService); } @@ -220,6 +220,6 @@ void deployClusterOperator() { createTestClassResources(); applyRoleBindings(getBridgeNamespace()); // 050-Deployment - testClassResources.clusterOperator(getBridgeNamespace()).done(); + getTestClassResources().clusterOperator(getBridgeNamespace()).done(); } } diff --git a/systemtest/src/main/java/io/strimzi/systemtest/Resources.java b/systemtest/src/main/java/io/strimzi/systemtest/Resources.java index 773efb4613b..185d4812e2a 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/Resources.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/Resources.java @@ -72,6 +72,7 @@ import java.net.MalformedURLException; import java.net.URL; +import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Base64; import java.util.Collections; @@ -1123,7 +1124,7 @@ String clusterCaCertSecretName(String cluster) { String saslConfigs(KafkaUser kafkaUser) { Secret secret = client().getSecret(kafkaUser.getMetadata().getName()); - String password = new String(Base64.getDecoder().decode(secret.getData().get("password"))); + String password = new String(Base64.getDecoder().decode(secret.getData().get("password")), Charset.forName("UTF-8")); if (password.isEmpty()) { LOGGER.info("Secret {}:\n{}", kafkaUser.getMetadata().getName(), toYamlString(secret)); throw new RuntimeException("The Secret " + kafkaUser.getMetadata().getName() + " lacks the 'password' key"); diff --git a/systemtest/src/main/java/io/strimzi/systemtest/clients/api/VerifiableClient.java b/systemtest/src/main/java/io/strimzi/systemtest/clients/api/VerifiableClient.java index e72d74476dc..766beb0e1cd 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/clients/api/VerifiableClient.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/clients/api/VerifiableClient.java @@ -108,6 +108,7 @@ protected void setAllowedArguments(ClientType clientType) { allowedArguments.add(ClientArgument.VALUE_PREFIX); allowedArguments.add(ClientArgument.REPEATING_KEYS); allowedArguments.add(ClientArgument.USER); + break; case CLI_KAFKA_VERIFIABLE_CONSUMER: allowedArguments.add(ClientArgument.BROKER_LIST); allowedArguments.add(ClientArgument.TOPIC); diff --git a/systemtest/src/main/java/io/strimzi/systemtest/clients/lib/KafkaClientProperties.java b/systemtest/src/main/java/io/strimzi/systemtest/clients/lib/KafkaClientProperties.java index 4f39b902403..8a9b0e86654 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/clients/lib/KafkaClientProperties.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/clients/lib/KafkaClientProperties.java @@ -4,6 +4,7 @@ */ package io.strimzi.systemtest.clients.lib; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import io.fabric8.kubernetes.api.model.LoadBalancerIngress; import io.fabric8.kubernetes.api.model.Secret; import io.fabric8.kubernetes.api.model.Service; @@ -25,6 +26,7 @@ import java.io.File; import java.io.FileOutputStream; import java.io.IOException; +import java.nio.charset.Charset; import java.nio.file.Files; import java.security.KeyStore; import java.security.cert.Certificate; @@ -38,6 +40,7 @@ import static io.strimzi.test.BaseITST.kubeClient; import static org.junit.jupiter.api.Assertions.fail; +@SuppressFBWarnings("REC_CATCH_EXCEPTION") class KafkaClientProperties { private static final Logger LOGGER = LogManager.getLogger(KafkaClientProperties.class); @@ -151,7 +154,7 @@ private static Properties sharedClientProperties(String namespace, String cluste if (!userName.isEmpty() && securityProtocol.equals(SecurityProtocol.SASL_SSL.name)) { properties.setProperty(SaslConfigs.SASL_MECHANISM, "SCRAM-SHA-512"); Secret userSecret = kubeClient(namespace).getSecret(userName); - String password = new String(Base64.getDecoder().decode(userSecret.getData().get("password"))); + String password = new String(Base64.getDecoder().decode(userSecret.getData().get("password")), Charset.forName("UTF-8")); String jaasTemplate = "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"%s\" password=\"%s\";"; String jaasCfg = String.format(jaasTemplate, userName, password); @@ -239,6 +242,7 @@ private static String getExternalBootstrapConnect(String namespace, String clust * @throws IOException * @throws InterruptedException */ + @SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_BAD_PRACTICE") private static File createKeystore(byte[] ca, byte[] cert, byte[] key, String password) throws IOException, InterruptedException { File caFile = File.createTempFile(KafkaClientProperties.class.getName(), ".crt"); caFile.deleteOnExit(); diff --git a/systemtest/src/main/java/io/strimzi/systemtest/utils/LogCollector.java b/systemtest/src/main/java/io/strimzi/systemtest/utils/LogCollector.java index 30039bb7bbb..05a095e1420 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/utils/LogCollector.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/utils/LogCollector.java @@ -4,6 +4,7 @@ */ package io.strimzi.systemtest.utils; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import io.strimzi.test.k8s.KubeClient; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -22,6 +23,7 @@ public class LogCollector { private File configMapDir; private File eventsDir; + @SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_BAD_PRACTICE") public LogCollector(KubeClient kubeClient, File logDir) { this.kubeClient = kubeClient; this.namespace = kubeClient.getNamespace(); diff --git a/systemtest/src/main/java/io/strimzi/systemtest/utils/StUtils.java b/systemtest/src/main/java/io/strimzi/systemtest/utils/StUtils.java index 4f02b7373da..7c4720a74e3 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/utils/StUtils.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/utils/StUtils.java @@ -4,6 +4,7 @@ */ package io.strimzi.systemtest.utils; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import io.fabric8.kubernetes.api.model.ContainerStatus; import io.fabric8.kubernetes.api.model.LabelSelector; import io.fabric8.kubernetes.api.model.LabelSelectorBuilder; @@ -238,27 +239,49 @@ public static Map waitTillDepConfigHasRolled(String name, int ex return depConfigSnapshot(name); } - - public static File downloadAndUnzip(String url) throws IOException { - InputStream bais = (InputStream) URI.create(url).toURL().openConnection().getContent(); - File dir = Files.createTempDirectory(StUtils.class.getName()).toFile(); - dir.deleteOnExit(); - ZipInputStream zin = new ZipInputStream(bais); - ZipEntry entry = zin.getNextEntry(); - byte[] buffer = new byte[8 * 1024]; - int len; - while (entry != null) { - File file = new File(dir, entry.getName()); - if (entry.isDirectory()) { - file.mkdirs(); - } else { - FileOutputStream fout = new FileOutputStream(file); - while ((len = zin.read(buffer)) != -1) { - fout.write(buffer, 0, len); + @SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_BAD_PRACTICE") + public static File downloadAndUnzip(String url) { + File dir = null; + FileOutputStream fout = null; + ZipInputStream zin = null; + try { + InputStream bais = (InputStream) URI.create(url).toURL().openConnection().getContent(); + dir = Files.createTempDirectory(StUtils.class.getName()).toFile(); + dir.deleteOnExit(); + zin = new ZipInputStream(bais); + ZipEntry entry = zin.getNextEntry(); + byte[] buffer = new byte[8 * 1024]; + int len; + while (entry != null) { + File file = new File(dir, entry.getName()); + if (entry.isDirectory()) { + file.mkdirs(); + } else { + fout = new FileOutputStream(file); + while ((len = zin.read(buffer)) != -1) { + fout.write(buffer, 0, len); + } + fout.close(); + } + entry = zin.getNextEntry(); + } + } catch (IOException e) { + LOGGER.error("IOException {}", e.getMessage()); + } finally { + if (fout != null) { + try { + fout.close(); + } catch (IOException e) { + LOGGER.error("IOException {}", e.getMessage()); + } + } + if (zin != null) { + try { + zin.close(); + } catch (IOException e) { + e.printStackTrace(); } - fout.close(); } - entry = zin.getNextEntry(); } return dir; } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/AllNamespaceST.java b/systemtest/src/test/java/io/strimzi/systemtest/AllNamespaceST.java index 91598f30605..68b0639eb9c 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/AllNamespaceST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/AllNamespaceST.java @@ -117,12 +117,12 @@ void setupEnvironment(TestInfo testInfo) { applyRoleBindings(CO_NAMESPACE); // Create ClusterRoleBindings that grant cluster-wide access to all OpenShift projects - List clusterRoleBindingList = testClassResources.clusterRoleBindingsForAllNamespaces(CO_NAMESPACE); + List clusterRoleBindingList = getTestClassResources().clusterRoleBindingsForAllNamespaces(CO_NAMESPACE); clusterRoleBindingList.forEach(clusterRoleBinding -> - testClassResources.clusterRoleBinding(clusterRoleBinding, CO_NAMESPACE)); + getTestClassResources().clusterRoleBinding(clusterRoleBinding, CO_NAMESPACE)); LOGGER.info("Deploying CO to watch all namespaces"); - testClassResources.clusterOperator("*").done(); + getTestClassResources().clusterOperator("*").done(); String previousNamespace = setNamespace(THIRD_NAMESPACE); thirdNamespaceResources = new Resources(kubeClient()); @@ -135,7 +135,7 @@ void setupEnvironment(TestInfo testInfo) { @Override protected void tearDownEnvironmentAfterAll() { thirdNamespaceResources.deleteResources(); - testClassResources.deleteResources(); + getTestClassResources().deleteResources(); teardownEnvForOperator(); } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/ConnectS2IST.java b/systemtest/src/test/java/io/strimzi/systemtest/ConnectS2IST.java index 8847542c903..4b960c0f6d1 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/ConnectS2IST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/ConnectS2IST.java @@ -35,7 +35,7 @@ class ConnectS2IST extends AbstractST { @OpenShiftOnly @Tag(ACCEPTANCE) void testDeployS2IWithMongoDBPlugin() throws IOException { - testClassResources.kafkaConnectS2I(CONNECT_CLUSTER_NAME, 1, CLUSTER_NAME) + getTestClassResources().kafkaConnectS2I(CONNECT_CLUSTER_NAME, 1, CLUSTER_NAME) .editMetadata() .addToLabels("type", "kafka-connect-s2i") .endMetadata() @@ -68,12 +68,12 @@ void setupEnvironment() { createTestClassResources(); applyRoleBindings(NAMESPACE); // 050-Deployment - testClassResources.clusterOperator(NAMESPACE).done(); + getTestClassResources().clusterOperator(NAMESPACE).done(); deployTestSpecificResources(); } void deployTestSpecificResources() { - testClassResources.kafkaEphemeral(CLUSTER_NAME, 3, 1).done(); + getTestClassResources().kafkaEphemeral(CLUSTER_NAME, 3, 1).done(); } @Override diff --git a/systemtest/src/test/java/io/strimzi/systemtest/ConnectST.java b/systemtest/src/test/java/io/strimzi/systemtest/ConnectST.java index 3441bd8fff0..1c95b7251c4 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/ConnectST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/ConnectST.java @@ -257,7 +257,7 @@ void setupEnvironment() { applyRoleBindings(NAMESPACE); // 050-Deployment - testClassResources.clusterOperator(NAMESPACE).done(); + getTestClassResources().clusterOperator(NAMESPACE).done(); deployTestSpecificResources(); } @@ -267,7 +267,7 @@ void deployTestSpecificResources() { kafkaConfig.put("transaction.state.log.replication.factor", "3"); kafkaConfig.put("transaction.state.log.min.isr", "2"); - testClassResources.kafkaEphemeral(KAFKA_CLUSTER_NAME, 3) + getTestClassResources().kafkaEphemeral(KAFKA_CLUSTER_NAME, 3) .editSpec() .editKafka() .withConfig(kafkaConfig) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/CustomResourceStatusST.java b/systemtest/src/test/java/io/strimzi/systemtest/CustomResourceStatusST.java index 20f4158b923..9eb89503471 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/CustomResourceStatusST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/CustomResourceStatusST.java @@ -32,7 +32,7 @@ class CustomResourceStatusST extends AbstractST { @Test void testKafkaStatus() throws Exception { LOGGER.info("Checking status of deployed kafka cluster"); - Condition kafkaCondition = testClassResources.kafka().inNamespace(NAMESPACE).withName(CLUSTER_NAME).get().getStatus().getConditions().get(0); + Condition kafkaCondition = getTestClassResources().kafka().inNamespace(NAMESPACE).withName(CLUSTER_NAME).get().getStatus().getConditions().get(0); logCurrentStatus(kafkaCondition); assertThat("Kafka cluster is in wrong state!", kafkaCondition.getType(), is("Ready")); LOGGER.info("Kafka cluster is in desired state: Ready"); @@ -89,11 +89,11 @@ void createTestEnvironment() { createTestClassResources(); applyRoleBindings(NAMESPACE); // 050-Deployment - testClassResources.clusterOperator(NAMESPACE, Long.toString(Constants.CO_OPERATION_TIMEOUT)).done(); + getTestClassResources().clusterOperator(NAMESPACE, Long.toString(Constants.CO_OPERATION_TIMEOUT)).done(); - operationID = startDeploymentMeasuring(); + setOperationID(startDeploymentMeasuring()); - testClassResources.kafkaEphemeral(CLUSTER_NAME, 3, 1) + getTestClassResources().kafkaEphemeral(CLUSTER_NAME, 3, 1) .editSpec() .editKafka() .editListeners() @@ -105,7 +105,7 @@ void createTestEnvironment() { .endSpec() .done(); - testClassResources.topic(CLUSTER_NAME, TOPIC_NAME); + getTestClassResources().topic(CLUSTER_NAME, TOPIC_NAME); } void logCurrentStatus(Condition kafkaCondition) { @@ -116,7 +116,7 @@ void logCurrentStatus(Condition kafkaCondition) { void waitForKafkaStatus(String status) { TestUtils.waitFor("Wait until status is false", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> { - Condition kafkaCondition = testClassResources().kafka().inNamespace(NAMESPACE).withName(CLUSTER_NAME).get().getStatus().getConditions().get(0); + Condition kafkaCondition = getTestClassResources().kafka().inNamespace(NAMESPACE).withName(CLUSTER_NAME).get().getStatus().getConditions().get(0); logCurrentStatus(kafkaCondition); return kafkaCondition.getType().equals(status); }); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/KafkaST.java b/systemtest/src/test/java/io/strimzi/systemtest/KafkaST.java index 5c80057ec73..0d69217a45e 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/KafkaST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/KafkaST.java @@ -123,7 +123,7 @@ void testDeployKafkaClusterViaTemplate() { @Test @Tag(ACCEPTANCE) void testKafkaAndZookeeperScaleUpScaleDown() throws Exception { - operationID = startTimeMeasuring(Operation.SCALE_UP); + setOperationID(startTimeMeasuring(Operation.SCALE_UP)); testMethodResources().kafkaEphemeral(CLUSTER_NAME, 3) .editSpec() .editKafka() @@ -163,14 +163,14 @@ void testKafkaAndZookeeperScaleUpScaleDown() throws Exception { assertThat(events, hasAllOfReasons(Scheduled, Pulled, Created, Started)); waitForClusterAvailability(NAMESPACE, firstTopicName); //Test that CO doesn't have any exceptions in log - TimeMeasuringSystem.stopOperation(operationID); - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + TimeMeasuringSystem.stopOperation(getOperationID()); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); // scale down LOGGER.info("Scaling down"); // Get kafka new pod uid before deletion uid = kubeClient().getPodUid(newPodName); - operationID = startTimeMeasuring(Operation.SCALE_DOWN); + setOperationID(startTimeMeasuring(Operation.SCALE_DOWN)); replaceKafkaResource(CLUSTER_NAME, k -> k.getSpec().getKafka().setReplicas(initialReplicas)); StUtils.waitTillSsHasRolled(kafkaSsName, initialReplicas, kafkaPods); @@ -183,8 +183,8 @@ void testKafkaAndZookeeperScaleUpScaleDown() throws Exception { uid = kubeClient().getStatefulSetUid(kafkaClusterName(CLUSTER_NAME)); assertThat(getEvents(uid), hasAllOfReasons(SuccessfulDelete)); //Test that CO doesn't have any exceptions in log - TimeMeasuringSystem.stopOperation(operationID); - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + TimeMeasuringSystem.stopOperation(getOperationID()); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); String secondTopicName = "test-topic-2"; testMethodResources().topic(CLUSTER_NAME, secondTopicName, finalReplicas, finalReplicas).done(); @@ -215,7 +215,7 @@ void testEODeletion() { @Test void testZookeeperScaleUpScaleDown() { - operationID = startTimeMeasuring(Operation.SCALE_UP); + setOperationID(startTimeMeasuring(Operation.SCALE_UP)); testMethodResources().kafkaEphemeral(CLUSTER_NAME, 3).done(); // kafka cluster already deployed LOGGER.info("Running zookeeperScaleUpScaleDown with cluster {}", CLUSTER_NAME); @@ -238,14 +238,14 @@ void testZookeeperScaleUpScaleDown() { checkZkPodsLog(newZkPodNames); //Test that CO doesn't have any exceptions in log - TimeMeasuringSystem.stopOperation(operationID); - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + TimeMeasuringSystem.stopOperation(getOperationID()); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); // scale down LOGGER.info("Scaling down"); // Get zk-3 uid before deletion String uid = kubeClient().getPodUid(newZkPodNames.get(3)); - operationID = startTimeMeasuring(Operation.SCALE_DOWN); + setOperationID(startTimeMeasuring(Operation.SCALE_DOWN)); replaceKafkaResource(CLUSTER_NAME, k -> k.getSpec().getZookeeper().setReplicas(initialZkReplicas)); for (String name : newZkPodNames) { @@ -261,9 +261,9 @@ void testZookeeperScaleUpScaleDown() { uid = kubeClient().getStatefulSetUid(zookeeperClusterName(CLUSTER_NAME)); assertThat(getEvents(uid), hasAllOfReasons(SuccessfulDelete)); // Stop measuring - TimeMeasuringSystem.stopOperation(operationID); + TimeMeasuringSystem.stopOperation(getOperationID()); //Test that CO doesn't have any exceptions in log - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); } @Test @@ -674,7 +674,7 @@ void testJvmAndResources() { .endEntityOperator() .endSpec().done(); - operationID = startTimeMeasuring(Operation.NEXT_RECONCILIATION); + setOperationID(startTimeMeasuring(Operation.NEXT_RECONCILIATION)); // Make snapshots for Kafka cluster to meke sure that there is no rolling update after CO reconciliation String zkSsName = KafkaResources.zookeeperStatefulSetName(CLUSTER_NAME); @@ -705,14 +705,14 @@ void testJvmAndResources() { "512M", "300m", "256M", "300m"); TestUtils.waitFor("Wait till reconciliation timeout", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, - () -> !cmdKubeClient().searchInLog("deploy", "strimzi-cluster-operator", TimeMeasuringSystem.getCurrentDuration(testClass, testName, operationID), "\"Assembly reconciled\"").isEmpty()); + () -> !cmdKubeClient().searchInLog("deploy", "strimzi-cluster-operator", TimeMeasuringSystem.getCurrentDuration(testClass, testName, getOperationID()), "\"Assembly reconciled\"").isEmpty()); // Checking no rolling update after last CO reconciliation LOGGER.info("Checking no rolling update for Kafka cluster"); assertFalse(StUtils.ssHasRolled(zkSsName, zkPods)); assertFalse(StUtils.ssHasRolled(kafkaSsName, kafkaPods)); assertFalse(StUtils.depHasRolled(eoDepName, eoPods)); - TimeMeasuringSystem.stopOperation(operationID); + TimeMeasuringSystem.stopOperation(getOperationID()); } @Test @@ -808,7 +808,7 @@ void testRemoveTopicOperatorFromEntityOperator() { @Test void testRemoveUserOperatorFromEntityOperator() { LOGGER.info("Deploying Kafka cluster {}", CLUSTER_NAME); - operationID = startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT); + setOperationID(startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT)); testMethodResources().kafkaEphemeral(CLUSTER_NAME, 3).done(); String eoPodName = kubeClient().listPodsByPrefixInName(entityOperatorDeploymentName(CLUSTER_NAME)) .get(0).getMetadata().getName(); @@ -845,14 +845,14 @@ void testRemoveUserOperatorFromEntityOperator() { }); }); - TimeMeasuringSystem.stopOperation(operationID); - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + TimeMeasuringSystem.stopOperation(getOperationID()); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); } @Test void testRemoveUserAndTopicOperatorsFromEntityOperator() { LOGGER.info("Deploying Kafka cluster {}", CLUSTER_NAME); - operationID = startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT); + setOperationID(startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT)); testMethodResources().kafkaEphemeral(CLUSTER_NAME, 3).done(); String eoPodName = kubeClient().listPodsByPrefixInName(entityOperatorDeploymentName(CLUSTER_NAME)) @@ -890,14 +890,14 @@ void testRemoveUserAndTopicOperatorsFromEntityOperator() { }); }); - TimeMeasuringSystem.stopOperation(operationID); - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + TimeMeasuringSystem.stopOperation(getOperationID()); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); } @Test void testEntityOperatorWithoutTopicOperator() { LOGGER.info("Deploying Kafka cluster without TO in EO"); - operationID = startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT); + setOperationID(startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT)); testMethodResources().kafkaEphemeral(CLUSTER_NAME, 3) .editSpec() .withNewEntityOperator() @@ -907,8 +907,8 @@ void testEntityOperatorWithoutTopicOperator() { .endSpec() .done(); - TimeMeasuringSystem.stopOperation(operationID); - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + TimeMeasuringSystem.stopOperation(getOperationID()); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); //Checking that TO was not deployed kubeClient().listPodsByPrefixInName(entityOperatorDeploymentName(CLUSTER_NAME)).forEach(pod -> { @@ -921,7 +921,7 @@ void testEntityOperatorWithoutTopicOperator() { @Test void testEntityOperatorWithoutUserOperator() { LOGGER.info("Deploying Kafka cluster without UO in EO"); - operationID = startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT); + setOperationID(startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT)); testMethodResources().kafkaEphemeral(CLUSTER_NAME, 3) .editSpec() .withNewEntityOperator() @@ -931,8 +931,8 @@ void testEntityOperatorWithoutUserOperator() { .endSpec() .done(); - TimeMeasuringSystem.stopOperation(operationID); - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + TimeMeasuringSystem.stopOperation(getOperationID()); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); //Checking that UO was not deployed kubeClient().listPodsByPrefixInName(entityOperatorDeploymentName(CLUSTER_NAME)).forEach(pod -> { @@ -945,7 +945,7 @@ void testEntityOperatorWithoutUserOperator() { @Test void testEntityOperatorWithoutUserAndTopicOperators() { LOGGER.info("Deploying Kafka cluster without UO and TO in EO"); - operationID = startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT); + setOperationID(startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT)); testMethodResources().kafkaEphemeral(CLUSTER_NAME, 3) .editSpec() .withNewEntityOperator() @@ -953,8 +953,8 @@ void testEntityOperatorWithoutUserAndTopicOperators() { .endSpec() .done(); - TimeMeasuringSystem.stopOperation(operationID); - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + TimeMeasuringSystem.stopOperation(getOperationID()); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); //Checking that EO was not deployed assertEquals(0, kubeClient().listPodsByPrefixInName(entityOperatorDeploymentName(CLUSTER_NAME)).size(), "EO should not be deployed"); @@ -974,7 +974,7 @@ void testTopicWithoutLabels() { // Checking that resource was created assertThat(cmdKubeClient().list("kafkatopic"), hasItems("topic-without-labels")); - // Checking that TO didn't handl new topic and zk pods don't contain new topic + // Checking that TO didn't handle new topic and zk pods don't contain new topic assertThat(listTopicsUsingPodCLI(CLUSTER_NAME, 0), not(hasItems("topic-without-labels"))); // Checking TO logs @@ -1073,7 +1073,7 @@ void testManualTriggeringRollingUpdate() { testMethodResources().kafkaEphemeral(CLUSTER_NAME, 1).done(); // rolling update for kafka - operationID = startTimeMeasuring(Operation.ROLLING_UPDATE); + setOperationID(startTimeMeasuring(Operation.ROLLING_UPDATE)); // set annotation to trigger Kafka rolling update kubeClient().statefulSet(kafkaClusterName(CLUSTER_NAME)).cascading(false).edit() .editMetadata() @@ -1094,7 +1094,7 @@ void testManualTriggeringRollingUpdate() { assertThat(coLog, containsString("Rolling Kafka pod " + kafkaClusterName(CLUSTER_NAME) + "-0" + " due to manual rolling update")); // rolling update for zookeeper - operationID = startTimeMeasuring(Operation.ROLLING_UPDATE); + setOperationID(startTimeMeasuring(Operation.ROLLING_UPDATE)); // set annotation to trigger Zookeeper rolling update kubeClient().statefulSet(zookeeperClusterName(CLUSTER_NAME)).cascading(false).edit() .editMetadata() @@ -1384,7 +1384,7 @@ void setupEnvironment() { createTestClassResources(); applyRoleBindings(NAMESPACE); // 050-Deployment - testClassResources.clusterOperator(NAMESPACE).done(); + getTestClassResources().clusterOperator(NAMESPACE).done(); } @Override diff --git a/systemtest/src/test/java/io/strimzi/systemtest/LogSettingST.java b/systemtest/src/test/java/io/strimzi/systemtest/LogSettingST.java index 42be9acbef3..1cb926210ff 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/LogSettingST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/LogSettingST.java @@ -100,42 +100,42 @@ class LogSettingST extends AbstractST { @Test @Order(1) void testLoggersKafka() { - int duration = TimeMeasuringSystem.getCurrentDuration(testClass, testClass, operationID); + int duration = TimeMeasuringSystem.getCurrentDuration(testClass, testClass, getOperationID()); assertTrue(checkLoggersLevel(KAFKA_LOGGERS, duration, KAFKA_MAP), "Kafka's log level is set properly"); } @Test @Order(2) void testLoggersZookeeper() { - int duration = TimeMeasuringSystem.getCurrentDuration(testClass, testClass, operationID); + int duration = TimeMeasuringSystem.getCurrentDuration(testClass, testClass, getOperationID()); assertTrue(checkLoggersLevel(ZOOKEEPER_LOGGERS, duration, ZOOKEEPER_MAP), "Zookeeper's log level is set properly"); } @Test @Order(3) void testLoggersTO() { - int duration = TimeMeasuringSystem.getCurrentDuration(testClass, testClass, operationID); + int duration = TimeMeasuringSystem.getCurrentDuration(testClass, testClass, getOperationID()); assertTrue(checkLoggersLevel(OPERATORS_LOGGERS, duration, TO_MAP), "Topic operator's log level is set properly"); } @Test @Order(4) void testLoggersUO() { - int duration = TimeMeasuringSystem.getCurrentDuration(testClass, testClass, operationID); + int duration = TimeMeasuringSystem.getCurrentDuration(testClass, testClass, getOperationID()); assertTrue(checkLoggersLevel(OPERATORS_LOGGERS, duration, UO_MAP), "User operator's log level is set properly"); } @Test @Order(5) void testLoggersKafkaConnect() { - int duration = TimeMeasuringSystem.getCurrentDuration(testClass, testClass, operationID); + int duration = TimeMeasuringSystem.getCurrentDuration(testClass, testClass, getOperationID()); assertTrue(checkLoggersLevel(CONNECT_LOGGERS, duration, CONNECT_MAP), "Kafka connect's log level is set properly"); } @Test @Order(6) void testLoggersMirrorMaker() { - int duration = TimeMeasuringSystem.getCurrentDuration(testClass, testClass, operationID); + int duration = TimeMeasuringSystem.getCurrentDuration(testClass, testClass, getOperationID()); assertTrue(checkLoggersLevel(MIRROR_MAKER_LOGGERS, duration, MM_MAP), "Mirror maker's log level is set properly"); } @@ -267,11 +267,11 @@ void createClassResources() { createTestClassResources(); applyRoleBindings(NAMESPACE); // 050-Deployment - testClassResources.clusterOperator(NAMESPACE).done(); + getTestClassResources().clusterOperator(NAMESPACE).done(); - operationID = startDeploymentMeasuring(); + setOperationID(startDeploymentMeasuring()); - testClassResources.kafkaEphemeral(CLUSTER_NAME, 3, 1) + getTestClassResources().kafkaEphemeral(CLUSTER_NAME, 3, 1) .editSpec() .editKafka() .withNewInlineLogging() @@ -310,7 +310,7 @@ void createClassResources() { .endSpec() .done(); - testClassResources.kafkaEphemeral(GC_LOGGING_SET_NAME, 3, 1) + getTestClassResources().kafkaEphemeral(GC_LOGGING_SET_NAME, 3, 1) .editSpec() .editKafka() .withNewJvmOptions() @@ -333,7 +333,7 @@ void createClassResources() { .endSpec() .done(); - testClassResources.kafkaConnect(CLUSTER_NAME, 1) + getTestClassResources().kafkaConnect(CLUSTER_NAME, 1) .editSpec() .withNewInlineLogging() .withLoggers(CONNECT_LOGGERS) @@ -343,7 +343,7 @@ void createClassResources() { .endJvmOptions() .endSpec().done(); - testClassResources.kafkaMirrorMaker(CLUSTER_NAME, CLUSTER_NAME, GC_LOGGING_SET_NAME, "my-group", 1, false) + getTestClassResources().kafkaMirrorMaker(CLUSTER_NAME, CLUSTER_NAME, GC_LOGGING_SET_NAME, "my-group", 1, false) .editSpec() .withNewInlineLogging() .withLoggers(MIRROR_MAKER_LOGGERS) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/MirrorMakerST.java b/systemtest/src/test/java/io/strimzi/systemtest/MirrorMakerST.java index e291de1d188..09ab0af55bd 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/MirrorMakerST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/MirrorMakerST.java @@ -42,7 +42,7 @@ public class MirrorMakerST extends MessagingBaseST { void testMirrorMaker() throws Exception { Map jvmOptionsXX = new HashMap<>(); jvmOptionsXX.put("UseG1GC", "true"); - operationID = startTimeMeasuring(Operation.MM_DEPLOYMENT); + setOperationID(startTimeMeasuring(Operation.MM_DEPLOYMENT)); String topicSourceName = TOPIC_NAME + "-source" + "-" + rng.nextInt(Integer.MAX_VALUE); String kafkaSourceName = CLUSTER_NAME + "-source"; String kafkaTargetName = CLUSTER_NAME + "-target"; @@ -89,7 +89,7 @@ void testMirrorMaker() throws Exception { assertExpectedJavaOpts(podName, kafkaMirrorMakerName(CLUSTER_NAME), "-Xmx200m", "-Xms200m", "-server", "-XX:+UseG1GC"); - TimeMeasuringSystem.stopOperation(operationID); + TimeMeasuringSystem.stopOperation(getOperationID()); int sent = sendMessages(messagesCount, Constants.TIMEOUT_SEND_MESSAGES, kafkaSourceName, false, topicSourceName, null); int receivedSource = receiveMessages(messagesCount, Constants.TIMEOUT_RECV_MESSAGES, kafkaSourceName, false, topicSourceName, null); @@ -105,7 +105,7 @@ void testMirrorMaker() throws Exception { @Test @Tag(ACCEPTANCE) void testMirrorMakerTlsAuthenticated() throws Exception { - operationID = startTimeMeasuring(Operation.MM_DEPLOYMENT); + setOperationID(startTimeMeasuring(Operation.MM_DEPLOYMENT)); String topicSourceName = TOPIC_NAME + "-source" + "-" + rng.nextInt(Integer.MAX_VALUE); String kafkaSourceUserName = "my-user-source"; String kafkaUserTargetName = "my-user-target"; @@ -182,7 +182,7 @@ void testMirrorMakerTlsAuthenticated() throws Exception { .endSpec() .done(); - TimeMeasuringSystem.stopOperation(operationID); + TimeMeasuringSystem.stopOperation(getOperationID()); int sent = sendMessages(messagesCount, Constants.TIMEOUT_SEND_MESSAGES, kafkaClusterSourceName, true, topicSourceName, userSource); int receivedSource = receiveMessages(messagesCount, Constants.TIMEOUT_RECV_MESSAGES, kafkaClusterSourceName, true, topicSourceName, userSource); @@ -197,7 +197,7 @@ void testMirrorMakerTlsAuthenticated() throws Exception { */ @Test void testMirrorMakerTlsScramSha() throws Exception { - operationID = startTimeMeasuring(Operation.MM_DEPLOYMENT); + setOperationID(startTimeMeasuring(Operation.MM_DEPLOYMENT)); String kafkaUserSource = "my-user-source"; String kafkaUserTarget = "my-user-target"; String kafkaSourceName = CLUSTER_NAME + "-source"; @@ -285,7 +285,7 @@ void testMirrorMakerTlsScramSha() throws Exception { // Deploy topic testMethodResources().topic(kafkaSourceName, topicName).done(); - TimeMeasuringSystem.stopOperation(operationID); + TimeMeasuringSystem.stopOperation(getOperationID()); int sent = sendMessages(messagesCount, Constants.TIMEOUT_SEND_MESSAGES, kafkaSourceName, true, topicName, userSource); int receivedSource = receiveMessages(messagesCount, Constants.TIMEOUT_RECV_MESSAGES, kafkaSourceName, true, topicName, userSource); @@ -316,12 +316,12 @@ void setupEnvironment() { createTestClassResources(); applyRoleBindings(NAMESPACE); // 050-Deployment - testClassResources.clusterOperator(NAMESPACE).done(); + getTestClassResources().clusterOperator(NAMESPACE).done(); } @AfterAll void teardownEnvironment() { - testClassResources.deleteResources(); + getTestClassResources().deleteResources(); teardownEnvForOperator(); } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/MultipleNamespaceST.java b/systemtest/src/test/java/io/strimzi/systemtest/MultipleNamespaceST.java index 6677c121666..7d5b0563c65 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/MultipleNamespaceST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/MultipleNamespaceST.java @@ -65,13 +65,13 @@ void setupEnvironment() { applyRoleBindings(CO_NAMESPACE, SECOND_NAMESPACE); LOGGER.info("Deploying CO to watch multiple namespaces"); - testClassResources.clusterOperator(String.join(",", CO_NAMESPACE, SECOND_NAMESPACE)).done(); + getTestClassResources().clusterOperator(String.join(",", CO_NAMESPACE, SECOND_NAMESPACE)).done(); deployTestSpecificResources(); } private void deployTestSpecificResources() { - testClassResources.kafkaEphemeral(CLUSTER_NAME, 3) + getTestClassResources().kafkaEphemeral(CLUSTER_NAME, 3) .editSpec() .editEntityOperator() .editTopicOperator() diff --git a/systemtest/src/test/java/io/strimzi/systemtest/RecoveryST.java b/systemtest/src/test/java/io/strimzi/systemtest/RecoveryST.java index 5305b27c893..1a8e2882231 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/RecoveryST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/RecoveryST.java @@ -31,7 +31,7 @@ class RecoveryST extends AbstractST { @Test void testRecoveryFromEntityOperatorDeletion() { - operationID = startTimeMeasuring(Operation.CLUSTER_RECOVERY); + setOperationID(startTimeMeasuring(Operation.CLUSTER_RECOVERY)); // kafka cluster already deployed LOGGER.info("Running testRecoveryFromEntityOperatorDeletion with cluster {}", CLUSTER_NAME); String entityOperatorDeploymentName = entityOperatorDeploymentName(CLUSTER_NAME); @@ -42,15 +42,15 @@ void testRecoveryFromEntityOperatorDeletion() { StUtils.waitForDeploymentRecovery(entityOperatorDeploymentName, entityOperatorDeploymentUid); StUtils.waitForDeploymentReady(entityOperatorDeploymentName, 1); - TimeMeasuringSystem.stopOperation(operationID); + TimeMeasuringSystem.stopOperation(getOperationID()); //Test that CO doesn't have any exceptions in log - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); } @Test @Tag(ACCEPTANCE) void testRecoveryFromKafkaStatefulSetDeletion() { - operationID = startTimeMeasuring(Operation.CLUSTER_RECOVERY); + setOperationID(startTimeMeasuring(Operation.CLUSTER_RECOVERY)); // kafka cluster already deployed LOGGER.info("Running deleteKafkaStatefulSet with cluster {}", CLUSTER_NAME); String kafkaStatefulSetName = kafkaClusterName(CLUSTER_NAME); @@ -61,15 +61,15 @@ void testRecoveryFromKafkaStatefulSetDeletion() { StUtils.waitForStatefulSetRecovery(kafkaStatefulSetName, kafkaStatefulSetUid); StUtils.waitForAllStatefulSetPodsReady(kafkaStatefulSetName, 3); - TimeMeasuringSystem.stopOperation(operationID); + TimeMeasuringSystem.stopOperation(getOperationID()); //Test that CO doesn't have any exceptions in log - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); } @Test @Tag(ACCEPTANCE) void testRecoveryFromZookeeperStatefulSetDeletion() { - operationID = startTimeMeasuring(Operation.CLUSTER_RECOVERY); + setOperationID(startTimeMeasuring(Operation.CLUSTER_RECOVERY)); // kafka cluster already deployed LOGGER.info("Running deleteZookeeperStatefulSet with cluster {}", CLUSTER_NAME); String zookeeperStatefulSetName = zookeeperClusterName(CLUSTER_NAME); @@ -80,14 +80,14 @@ void testRecoveryFromZookeeperStatefulSetDeletion() { StUtils.waitForStatefulSetRecovery(zookeeperStatefulSetName, zookeeperStatefulSetUid); StUtils.waitForAllStatefulSetPodsReady(zookeeperStatefulSetName, 1); - TimeMeasuringSystem.stopOperation(operationID); + TimeMeasuringSystem.stopOperation(getOperationID()); //Test that CO doesn't have any exceptions in log - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); } @Test void testRecoveryFromKafkaServiceDeletion() { - operationID = startTimeMeasuring(Operation.CLUSTER_RECOVERY); + setOperationID(startTimeMeasuring(Operation.CLUSTER_RECOVERY)); // kafka cluster already deployed LOGGER.info("Running deleteKafkaService with cluster {}", CLUSTER_NAME); String kafkaServiceName = kafkaServiceName(CLUSTER_NAME); @@ -97,14 +97,14 @@ void testRecoveryFromKafkaServiceDeletion() { LOGGER.info("Waiting for creation {}", kafkaServiceName); StUtils.waitForServiceRecovery(kafkaServiceName, kafkaServiceUid); - TimeMeasuringSystem.stopOperation(operationID); + TimeMeasuringSystem.stopOperation(getOperationID()); //Test that CO doesn't have any exceptions in log - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); } @Test void testRecoveryFromZookeeperServiceDeletion() { - operationID = startTimeMeasuring(Operation.CLUSTER_RECOVERY); + setOperationID(startTimeMeasuring(Operation.CLUSTER_RECOVERY)); // kafka cluster already deployed LOGGER.info("Running deleteKafkaService with cluster {}", CLUSTER_NAME); String zookeeperServiceName = zookeeperServiceName(CLUSTER_NAME); @@ -114,14 +114,14 @@ void testRecoveryFromZookeeperServiceDeletion() { LOGGER.info("Waiting for creation {}", zookeeperServiceName); StUtils.waitForServiceRecovery(zookeeperServiceName, zookeeperServiceUid); - TimeMeasuringSystem.stopOperation(operationID); + TimeMeasuringSystem.stopOperation(getOperationID()); //Test that CO doesn't have any exceptions in log - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); } @Test void testRecoveryFromKafkaHeadlessServiceDeletion() { - operationID = startTimeMeasuring(Operation.CLUSTER_RECOVERY); + setOperationID(startTimeMeasuring(Operation.CLUSTER_RECOVERY)); // kafka cluster already deployed LOGGER.info("Running deleteKafkaHeadlessService with cluster {}", CLUSTER_NAME); String kafkaHeadlessServiceName = kafkaHeadlessServiceName(CLUSTER_NAME); @@ -131,14 +131,14 @@ void testRecoveryFromKafkaHeadlessServiceDeletion() { LOGGER.info("Waiting for creation {}", kafkaHeadlessServiceName); StUtils.waitForServiceRecovery(kafkaHeadlessServiceName, kafkaHeadlessServiceUid); - TimeMeasuringSystem.stopOperation(operationID); + TimeMeasuringSystem.stopOperation(getOperationID()); //Test that CO doesn't have any exceptions in log - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); } @Test void testRecoveryFromZookeeperHeadlessServiceDeletion() { - operationID = startTimeMeasuring(Operation.CLUSTER_RECOVERY); + setOperationID(startTimeMeasuring(Operation.CLUSTER_RECOVERY)); // kafka cluster already deployed LOGGER.info("Running deleteKafkaHeadlessService with cluster {}", CLUSTER_NAME); String zookeeperHeadlessServiceName = zookeeperHeadlessServiceName(CLUSTER_NAME); @@ -148,14 +148,14 @@ void testRecoveryFromZookeeperHeadlessServiceDeletion() { LOGGER.info("Waiting for creation {}", zookeeperHeadlessServiceName); StUtils.waitForServiceRecovery(zookeeperHeadlessServiceName, zookeeperHeadlessServiceUid); - TimeMeasuringSystem.stopOperation(operationID); + TimeMeasuringSystem.stopOperation(getOperationID()); //Test that CO doesn't have any exceptions in log - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); } @Test void testRecoveryFromKafkaMetricsConfigDeletion() { - operationID = startTimeMeasuring(Operation.CLUSTER_RECOVERY); + setOperationID(startTimeMeasuring(Operation.CLUSTER_RECOVERY)); // kafka cluster already deployed LOGGER.info("Running deleteKafkaMetricsConfig with cluster {}", CLUSTER_NAME); String kafkaMetricsConfigName = kafkaMetricsConfigName(CLUSTER_NAME); @@ -165,14 +165,14 @@ void testRecoveryFromKafkaMetricsConfigDeletion() { LOGGER.info("Waiting for creation {}", kafkaMetricsConfigName); StUtils.waitForConfigMapRecovery(kafkaMetricsConfigName, kafkaMetricsConfigUid); - TimeMeasuringSystem.stopOperation(operationID); + TimeMeasuringSystem.stopOperation(getOperationID()); //Test that CO doesn't have any exceptions in log - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); } @Test void testRecoveryFromZookeeperMetricsConfigDeletion() { - operationID = startTimeMeasuring(Operation.CLUSTER_RECOVERY); + setOperationID(startTimeMeasuring(Operation.CLUSTER_RECOVERY)); LOGGER.info("Running deleteZookeeperMetricsConfig with cluster {}", CLUSTER_NAME); // kafka cluster already deployed String zookeeperMetricsConfigName = zookeeperMetricsConfigName(CLUSTER_NAME); @@ -182,15 +182,15 @@ void testRecoveryFromZookeeperMetricsConfigDeletion() { LOGGER.info("Waiting for creation {}", zookeeperMetricsConfigName); StUtils.waitForConfigMapRecovery(zookeeperMetricsConfigName, zookeeperMetricsConfigUid); - TimeMeasuringSystem.stopOperation(operationID); + TimeMeasuringSystem.stopOperation(getOperationID()); //Test that CO doesn't have any exceptions in log - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); } @Test @Tag(BRIDGE) void testRecoveryFromKafkaBridgeDeploymentDeletion() { - operationID = startTimeMeasuring(Operation.CLUSTER_RECOVERY); + setOperationID(startTimeMeasuring(Operation.CLUSTER_RECOVERY)); LOGGER.info("Running deleteKafkaBridgeDeployment with cluster {}", CLUSTER_NAME); // kafka cluster already deployed String kafkaBridgeDeploymentName = KafkaBridgeResources.deploymentName(CLUSTER_NAME); @@ -200,15 +200,15 @@ void testRecoveryFromKafkaBridgeDeploymentDeletion() { LOGGER.info("Waiting for deployment {} recovery", kafkaBridgeDeploymentName); StUtils.waitForDeploymentRecovery(kafkaBridgeDeploymentName, kafkaBridgeDeploymentUid); - TimeMeasuringSystem.stopOperation(operationID); + TimeMeasuringSystem.stopOperation(getOperationID()); //Test that CO doesn't have any exceptions in log - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); } @Test @Tag(BRIDGE) void testRecoveryFromKafkaBridgeServiceDeletion() { - operationID = startTimeMeasuring(Operation.CLUSTER_RECOVERY); + setOperationID(startTimeMeasuring(Operation.CLUSTER_RECOVERY)); LOGGER.info("Running deleteKafkaBridgeService with cluster {}", CLUSTER_NAME); String kafkaBridgeServiceName = KafkaBridgeResources.serviceName(CLUSTER_NAME); String kafkaBridgeServiceUid = kubeClient().namespace(NAMESPACE).getServiceUid(kafkaBridgeServiceName); @@ -217,15 +217,15 @@ void testRecoveryFromKafkaBridgeServiceDeletion() { LOGGER.info("Waiting for service {} recovery", kafkaBridgeServiceName); StUtils.waitForServiceRecovery(kafkaBridgeServiceName, kafkaBridgeServiceUid); - TimeMeasuringSystem.stopOperation(operationID); + TimeMeasuringSystem.stopOperation(getOperationID()); //Test that CO doesn't have any exceptions in log - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); } @Test @Tag(BRIDGE) void testRecoveryFromKafkaBridgeMetricsConfigDeletion() { - operationID = startTimeMeasuring(Operation.CLUSTER_RECOVERY); + setOperationID(startTimeMeasuring(Operation.CLUSTER_RECOVERY)); LOGGER.info("Running deleteKafkaBridgeMetricsConfig with cluster {}", CLUSTER_NAME); String kafkaBridgeMetricsConfigName = KafkaBridgeResources.metricsAndLogConfigMapName(CLUSTER_NAME); String kafkaBridgeMetricsConfigUid = kubeClient().getConfigMapUid(kafkaBridgeMetricsConfigName); @@ -234,9 +234,9 @@ void testRecoveryFromKafkaBridgeMetricsConfigDeletion() { LOGGER.info("Waiting for metric config {} re-creation", kafkaBridgeMetricsConfigName); StUtils.waitForConfigMapRecovery(kafkaBridgeMetricsConfigName, kafkaBridgeMetricsConfigUid); - TimeMeasuringSystem.stopOperation(operationID); + TimeMeasuringSystem.stopOperation(getOperationID()); //Test that CO doesn't have any exceptions in log - assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, operationID)); + assertNoCoErrorsLogged(TimeMeasuringSystem.getDurationInSecconds(testClass, testName, getOperationID())); } @BeforeAll @@ -247,14 +247,14 @@ void setupEnvironment() { createTestClassResources(); applyRoleBindings(NAMESPACE); // 050-Deployment - testClassResources.clusterOperator(NAMESPACE).done(); + getTestClassResources().clusterOperator(NAMESPACE).done(); deployTestSpecificResources(); } void deployTestSpecificResources() { - testClassResources.kafkaEphemeral(CLUSTER_NAME, 3, 1).done(); - testClassResources.kafkaBridge(CLUSTER_NAME, KafkaResources.plainBootstrapAddress(CLUSTER_NAME), 1, Constants.HTTP_BRIDGE_DEFAULT_PORT).done(); + getTestClassResources().kafkaEphemeral(CLUSTER_NAME, 3, 1).done(); + getTestClassResources().kafkaBridge(CLUSTER_NAME, KafkaResources.plainBootstrapAddress(CLUSTER_NAME), 1, Constants.HTTP_BRIDGE_DEFAULT_PORT).done(); } @Override diff --git a/systemtest/src/test/java/io/strimzi/systemtest/RollingUpdateST.java b/systemtest/src/test/java/io/strimzi/systemtest/RollingUpdateST.java index 44e2e0772b0..c540fde8bcf 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/RollingUpdateST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/RollingUpdateST.java @@ -38,7 +38,7 @@ class RollingUpdateST extends AbstractST { @Test void testRecoveryDuringZookeeperRollingUpdate() { // @TODO add send-recv messages during this test - operationID = startTimeMeasuring(Operation.CLUSTER_RECOVERY); + setOperationID(startTimeMeasuring(Operation.CLUSTER_RECOVERY)); String firstZkPodName = KafkaResources.zookeeperPodName(CLUSTER_NAME, 0); String logZkPattern = "'Exceeded timeout of .* while waiting for Pods resource " + firstZkPodName + "'"; @@ -60,7 +60,7 @@ void testRecoveryDuringZookeeperRollingUpdate() { StUtils.waitForPod(firstZkPodName); TestUtils.waitFor("Wait till rolling update timeout", Constants.CO_OPERATION_TIMEOUT_POLL, Constants.CO_OPERATION_TIMEOUT_WAIT, - () -> !cmdKubeClient().searchInLog("deploy", "strimzi-cluster-operator", TimeMeasuringSystem.getCurrentDuration(testClass, testName, operationID), logZkPattern).isEmpty()); + () -> !cmdKubeClient().searchInLog("deploy", "strimzi-cluster-operator", TimeMeasuringSystem.getCurrentDuration(testClass, testName, getOperationID()), logZkPattern).isEmpty()); assertThatRollingUpdatedFinished(KafkaResources.zookeeperStatefulSetName(CLUSTER_NAME), KafkaResources.kafkaStatefulSetName(CLUSTER_NAME)); @@ -83,13 +83,13 @@ void testRecoveryDuringZookeeperRollingUpdate() { assertThatRollingUpdatedFinished(KafkaResources.zookeeperStatefulSetName(CLUSTER_NAME), KafkaResources.kafkaStatefulSetName(CLUSTER_NAME)); TimeMeasuringSystem.stopOperation(rollingUpdateOperation); - TimeMeasuringSystem.stopOperation(operationID); + TimeMeasuringSystem.stopOperation(getOperationID()); } @Test void testRecoveryDuringKafkaRollingUpdate() { // @TODO add send-recv messages during this test - operationID = startTimeMeasuring(Operation.CLUSTER_RECOVERY); + setOperationID(startTimeMeasuring(Operation.CLUSTER_RECOVERY)); String firstKafkaPodName = KafkaResources.kafkaPodName(CLUSTER_NAME, 0); String logKafkaPattern = "'Exceeded timeout of .* while waiting for Pods resource " + firstKafkaPodName + "'"; @@ -111,7 +111,7 @@ void testRecoveryDuringKafkaRollingUpdate() { StUtils.waitForPod(firstKafkaPodName); TestUtils.waitFor("Wait till rolling update timeouted", Constants.CO_OPERATION_TIMEOUT_POLL, Constants.CO_OPERATION_TIMEOUT_WAIT, - () -> !cmdKubeClient().searchInLog("deploy", "strimzi-cluster-operator", TimeMeasuringSystem.getCurrentDuration(testClass, testName, operationID), logKafkaPattern).isEmpty()); + () -> !cmdKubeClient().searchInLog("deploy", "strimzi-cluster-operator", TimeMeasuringSystem.getCurrentDuration(testClass, testName, getOperationID()), logKafkaPattern).isEmpty()); assertThatRollingUpdatedFinished(KafkaResources.kafkaStatefulSetName(CLUSTER_NAME), KafkaResources.zookeeperStatefulSetName(CLUSTER_NAME)); @@ -134,7 +134,7 @@ void testRecoveryDuringKafkaRollingUpdate() { assertThatRollingUpdatedFinished(KafkaResources.kafkaStatefulSetName(CLUSTER_NAME), KafkaResources.zookeeperStatefulSetName(CLUSTER_NAME)); TimeMeasuringSystem.stopOperation(rollingUpdateOperation); - TimeMeasuringSystem.stopOperation(operationID); + TimeMeasuringSystem.stopOperation(getOperationID()); } void assertThatRollingUpdatedFinished(String rolledComponent, String stableComponent) { @@ -173,7 +173,7 @@ void setupEnvironment() { createTestClassResources(); applyRoleBindings(NAMESPACE); // 050-Deployment - testClassResources.clusterOperator(NAMESPACE, Long.toString(Constants.CO_OPERATION_TIMEOUT)).done(); + getTestClassResources().clusterOperator(NAMESPACE, Long.toString(Constants.CO_OPERATION_TIMEOUT)).done(); } @Override diff --git a/systemtest/src/test/java/io/strimzi/systemtest/SecurityST.java b/systemtest/src/test/java/io/strimzi/systemtest/SecurityST.java index 4680f17358c..f378a1f79a1 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/SecurityST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/SecurityST.java @@ -488,7 +488,7 @@ void setupEnvironment() { createTestClassResources(); applyRoleBindings(NAMESPACE); // 050-Deployment - testClassResources.clusterOperator(NAMESPACE).done(); + getTestClassResources().clusterOperator(NAMESPACE).done(); } @Override diff --git a/systemtest/src/test/java/io/strimzi/systemtest/UserST.java b/systemtest/src/test/java/io/strimzi/systemtest/UserST.java index 3dc8e22a189..ad79c31e985 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/UserST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/UserST.java @@ -159,7 +159,7 @@ void setupEnvironment() { createTestClassResources(); applyRoleBindings(NAMESPACE); // 050-Deployment - testClassResources.clusterOperator(NAMESPACE).done(); + getTestClassResources().clusterOperator(NAMESPACE).done(); } @Override diff --git a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java index e8c450b54c8..197e0370610 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java @@ -41,7 +41,7 @@ void testSendSimpleMessage() throws Exception { int messageCount = 50; String topicName = "topic-simple-send"; // Create topic - testClassResources.topic(CLUSTER_NAME, topicName).done(); + getTestClassResources().topic(CLUSTER_NAME, topicName).done(); JsonObject records = generateHttpMessages(messageCount); JsonObject response = sendHttpRequests(records, bridgeHost, bridgePort, topicName); @@ -54,7 +54,7 @@ void testReceiveSimpleMessage() throws Exception { int messageCount = 50; String topicName = "topic-simple-receive"; // Create topic - testClassResources.topic(CLUSTER_NAME, topicName).done(); + getTestClassResources().topic(CLUSTER_NAME, topicName).done(); String name = "my-kafka-consumer"; String groupId = "my-group-" + new Random().nextInt(Integer.MAX_VALUE); @@ -90,7 +90,7 @@ void testReceiveSimpleMessage() throws Exception { void createClassResources() throws InterruptedException { LOGGER.info("Deploy Kafka and Kafka Bridge before tests"); // Deploy kafka - testClassResources.kafkaEphemeral(CLUSTER_NAME, 1, 1) + getTestClassResources().kafkaEphemeral(CLUSTER_NAME, 1, 1) .editSpec() .editKafka() .editListeners() @@ -102,7 +102,7 @@ void createClassResources() throws InterruptedException { .endSpec().done(); // Deploy http bridge - testClassResources.kafkaBridge(CLUSTER_NAME, KafkaResources.plainBootstrapAddress(CLUSTER_NAME), 1, Constants.HTTP_BRIDGE_DEFAULT_PORT).done(); + getTestClassResources().kafkaBridge(CLUSTER_NAME, KafkaResources.plainBootstrapAddress(CLUSTER_NAME), 1, Constants.HTTP_BRIDGE_DEFAULT_PORT).done(); deployBridgeNodePortService(); bridgePort = getBridgeNodePort(); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeScramShaST.java b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeScramShaST.java index a44904b9f6b..69c6cf20933 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeScramShaST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeScramShaST.java @@ -48,7 +48,7 @@ void testSendSimpleMessageTlsScramSha() throws Exception { int messageCount = 50; String topicName = "topic-simple-send-" + new Random().nextInt(Integer.MAX_VALUE); // Create topic - testClassResources.topic(CLUSTER_NAME, topicName).done(); + getTestClassResources().topic(CLUSTER_NAME, topicName).done(); JsonObject records = generateHttpMessages(messageCount); JsonObject response = sendHttpRequests(records, bridgeHost, bridgePort, topicName); @@ -61,7 +61,7 @@ void testReceiveSimpleMessageTlsScramSha() throws Exception { int messageCount = 50; String topicName = "topic-simple-receive-" + new Random().nextInt(Integer.MAX_VALUE); // Create topic - testClassResources.topic(CLUSTER_NAME, topicName).done(); + getTestClassResources().topic(CLUSTER_NAME, topicName).done(); // Send messages to Kafka sendMessagesExternalScramSha(NAMESPACE, topicName, messageCount, userName); @@ -103,7 +103,7 @@ void createClassResources() throws InterruptedException { listenerTls.setAuth(auth); // Deploy kafka - testClassResources.kafkaEphemeral(CLUSTER_NAME, 1, 1) + getTestClassResources().kafkaEphemeral(CLUSTER_NAME, 1, 1) .editSpec() .editKafka() .withNewListeners() @@ -117,7 +117,7 @@ void createClassResources() throws InterruptedException { .endSpec().done(); // Create Kafka user - KafkaUser userSource = testClassResources.scramShaUser(CLUSTER_NAME, userName).done(); + KafkaUser userSource = getTestClassResources().scramShaUser(CLUSTER_NAME, userName).done(); waitTillSecretExists(userName); // Initialize PasswordSecret to set this as PasswordSecret in Mirror Maker spec @@ -131,7 +131,7 @@ void createClassResources() throws InterruptedException { certSecret.setSecretName(clusterCaCertSecretName(CLUSTER_NAME)); // Deploy http bridge - testClassResources.kafkaBridge(CLUSTER_NAME, KafkaResources.tlsBootstrapAddress(CLUSTER_NAME), 1, Constants.HTTP_BRIDGE_DEFAULT_PORT) + getTestClassResources().kafkaBridge(CLUSTER_NAME, KafkaResources.tlsBootstrapAddress(CLUSTER_NAME), 1, Constants.HTTP_BRIDGE_DEFAULT_PORT) .editSpec() .withNewKafkaBridgeAuthenticationScramSha512() .withNewUsername(userName) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeTlsST.java b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeTlsST.java index 2f579972472..c2e4cf1e194 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeTlsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeTlsST.java @@ -46,7 +46,7 @@ void testSendSimpleMessageTls() throws Exception { int messageCount = 50; String topicName = "topic-simple-send"; // Create topic - testClassResources.topic(CLUSTER_NAME, topicName).done(); + getTestClassResources().topic(CLUSTER_NAME, topicName).done(); JsonObject records = generateHttpMessages(messageCount); JsonObject response = sendHttpRequests(records, bridgeHost, bridgePort, topicName); @@ -59,7 +59,7 @@ void testReceiveSimpleMessageTls() throws Exception { int messageCount = 50; String topicName = "topic-simple-receive"; // Create topic - testClassResources.topic(CLUSTER_NAME, topicName).done(); + getTestClassResources().topic(CLUSTER_NAME, topicName).done(); String name = "my-kafka-consumer"; String groupId = "my-group-" + new Random().nextInt(Integer.MAX_VALUE); @@ -100,7 +100,7 @@ void createClassResources() throws InterruptedException { listenerTls.setAuth(auth); // Deploy kafka - testClassResources.kafkaEphemeral(CLUSTER_NAME, 1, 1) + getTestClassResources().kafkaEphemeral(CLUSTER_NAME, 1, 1) .editSpec() .editKafka() .editListeners() @@ -115,7 +115,7 @@ void createClassResources() throws InterruptedException { .endSpec().done(); // Create Kafka user - KafkaUser userSource = testClassResources.tlsUser(CLUSTER_NAME, userName).done(); + KafkaUser userSource = getTestClassResources().tlsUser(CLUSTER_NAME, userName).done(); waitTillSecretExists(userName); // Initialize CertSecretSource with certificate and secret names for consumer @@ -124,7 +124,7 @@ void createClassResources() throws InterruptedException { certSecret.setSecretName(clusterCaCertSecretName(CLUSTER_NAME)); // Deploy http bridge - testClassResources.kafkaBridge(CLUSTER_NAME, KafkaResources.tlsBootstrapAddress(CLUSTER_NAME), 1, Constants.HTTP_BRIDGE_DEFAULT_PORT) + getTestClassResources().kafkaBridge(CLUSTER_NAME, KafkaResources.tlsBootstrapAddress(CLUSTER_NAME), 1, Constants.HTTP_BRIDGE_DEFAULT_PORT) .editSpec() .withNewTls() .withTrustedCertificates(certSecret) diff --git a/test-client/src/main/java/io/strimzi/test_client/HttpClientsListener.java b/test-client/src/main/java/io/strimzi/test_client/HttpClientsListener.java index 16b45f465ce..1b0c3deecad 100644 --- a/test-client/src/main/java/io/strimzi/test_client/HttpClientsListener.java +++ b/test-client/src/main/java/io/strimzi/test_client/HttpClientsListener.java @@ -45,6 +45,9 @@ public void start() { case DELETE: deleteHandler(request); break; + default: + LOGGER.warn("Unexpected HTTP method ({}).", request.method()); + break; } }); int port = 4242; diff --git a/topic-operator/pom.xml b/topic-operator/pom.xml index 778ba87db88..65bc0c257ba 100644 --- a/topic-operator/pom.xml +++ b/topic-operator/pom.xml @@ -115,6 +115,10 @@ io.strimzi operator-common + + com.github.spotbugs + spotbugs-annotations + diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/OperatorAssignedKafkaImpl.java b/topic-operator/src/main/java/io/strimzi/operator/topic/OperatorAssignedKafkaImpl.java index 30c6e7fd71c..17dedaa37f6 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/OperatorAssignedKafkaImpl.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/OperatorAssignedKafkaImpl.java @@ -7,6 +7,7 @@ import com.fasterxml.jackson.core.JsonEncoding; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import io.strimzi.operator.common.process.ProcessHelper; import io.vertx.core.AsyncResult; import io.vertx.core.CompositeFuture; @@ -45,6 +46,7 @@ * The operator is able to make rack-aware assignments (if so configured), but does not take into account * other aspects (e.g. disk utilisation, CPU load, network IO). */ +@SuppressFBWarnings({"REC_CATCH_EXCEPTION", "NP_BOOLEAN_RETURN_NULL"}) public class OperatorAssignedKafkaImpl extends BaseKafkaImpl { private static final Pattern REASSIGN_FAILED = Pattern.compile("Reassignment of partition .* failed"); @@ -257,7 +259,7 @@ private void executeReassignment(File reassignmentJsonFile, String zookeeper, Lo executeArgs.add(reassignmentJsonFile.toString()); executeArgs.add("--execute"); - if (!forEachLineStdout(ProcessHelper.executeSubprocess(executeArgs), line -> { + if (!Boolean.TRUE.equals(forEachLineStdout(ProcessHelper.executeSubprocess(executeArgs), line -> { if (line.contains("Partitions reassignment failed due to") || line.contains("There is an existing assignment running") || line.contains("Failed to reassign partitions")) { @@ -267,11 +269,13 @@ private void executeReassignment(File reassignmentJsonFile, String zookeeper, Lo } else { return null; } - })) { + }))) { throw new TransientOperatorException("Reassignment execution neither failed nor finished"); } } + // spotbugs bug + @SuppressFBWarnings("RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE") private String generateReassignment(Topic topic, String zookeeper) throws IOException, InterruptedException, ExecutionException { JsonFactory factory = new JsonFactory(); diff --git a/user-operator/pom.xml b/user-operator/pom.xml index 56e2e0a3aa3..d463261716d 100644 --- a/user-operator/pom.xml +++ b/user-operator/pom.xml @@ -106,6 +106,10 @@ test-jar test + + com.github.spotbugs + spotbugs-annotations + diff --git a/user-operator/src/main/java/io/strimzi/operator/user/Main.java b/user-operator/src/main/java/io/strimzi/operator/user/Main.java index 6c3e0f53553..4f1deb450b0 100644 --- a/user-operator/src/main/java/io/strimzi/operator/user/Main.java +++ b/user-operator/src/main/java/io/strimzi/operator/user/Main.java @@ -4,6 +4,7 @@ */ package io.strimzi.operator.user; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import io.fabric8.kubernetes.client.DefaultKubernetesClient; import io.fabric8.kubernetes.client.KubernetesClient; import io.strimzi.api.kafka.Crds; @@ -26,6 +27,7 @@ import java.util.HashMap; import java.util.Map; +@SuppressFBWarnings("DM_EXIT") public class Main { private static final Logger log = LogManager.getLogger(Main.class.getName());