diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceImpl.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceImpl.java index 3c5213c2c49cd..8b234a1aaa7b9 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceImpl.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceImpl.java @@ -29,9 +29,6 @@ import org.elasticsearch.logging.Logger; import java.io.IOException; -import java.security.AccessController; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; import java.util.ServiceLoader; public class AzureComputeServiceImpl extends AbstractLifecycleComponent implements AzureComputeService { @@ -94,11 +91,8 @@ private static String getRequiredSetting(Settings settings, Setting sett public HostedServiceGetDetailedResponse getServiceDetails() { SpecialPermission.check(); try { - return AccessController.doPrivileged( - (PrivilegedExceptionAction) () -> client.getHostedServicesOperations() - .getDetailed(serviceName) - ); - } catch (PrivilegedActionException e) { + return client.getHostedServicesOperations().getDetailed(serviceName); + } catch (Exception e) { throw new AzureServiceRemoteException("can not get list of azure nodes", e.getCause()); } } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2SeedHostsProvider.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2SeedHostsProvider.java index 930431bf4ea0c..de7ac23f4bc22 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2SeedHostsProvider.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2SeedHostsProvider.java @@ -107,7 +107,7 @@ protected List fetchDynamicNodes() { // NOTE: we don't filter by security group during the describe instances request for two reasons: // 1. differences in VPCs require different parameters during query (ID vs Name) // 2. We want to use two different strategies: (all security groups vs. any security groups) - descInstances = SocketAccess.doPrivileged(() -> clientReference.client().describeInstances(buildDescribeInstancesRequest())); + descInstances = clientReference.client().describeInstances(buildDescribeInstancesRequest()); } catch (final Exception e) { logger.info("Exception while retrieving instance list from AWS API: {}", e.getMessage()); logger.debug("Full exception:", e); diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java index 1c3d613f6ee59..9896ec1af10f0 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java @@ -66,7 +66,7 @@ private Ec2Client buildClient(Ec2ClientSettings clientSettings) { final var endpoint = Endpoint.builder().url(URI.create(clientSettings.endpoint)).build(); ec2ClientBuilder.endpointProvider(endpointParams -> CompletableFuture.completedFuture(endpoint)); } - return SocketAccess.doPrivileged(ec2ClientBuilder::build); + return ec2ClientBuilder.build(); } private static void applyProxyConfiguration(Ec2ClientSettings clientSettings, ApacheHttpClient.Builder httpClientBuilder) { diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Utils.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Utils.java index 450949514baa9..730310c8a4a62 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Utils.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Utils.java @@ -22,8 +22,8 @@ class AwsEc2Utils { static String getInstanceMetadata(String metadataPath) { final var httpClientBuilder = ApacheHttpClient.builder(); httpClientBuilder.connectionTimeout(IMDS_CONNECTION_TIMEOUT); - try (var ec2Client = SocketAccess.doPrivileged(Ec2MetadataClient.builder().httpClient(httpClientBuilder)::build)) { - final var metadataValue = SocketAccess.doPrivileged(() -> ec2Client.get(metadataPath)).asString(); + try (var ec2Client = Ec2MetadataClient.builder().httpClient(httpClientBuilder).build()) { + final var metadataValue = ec2Client.get(metadataPath).asString(); if (Strings.hasText(metadataValue) == false) { throw new IllegalStateException("no ec2 metadata returned from " + metadataPath); } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/SocketAccess.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/SocketAccess.java deleted file mode 100644 index a9c42aed86d91..0000000000000 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/SocketAccess.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.discovery.ec2; - -import org.elasticsearch.SpecialPermission; - -import java.io.IOException; -import java.net.SocketPermission; -import java.security.AccessController; -import java.security.PrivilegedAction; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; - -/** - * This plugin uses aws libraries to connect to aws services. For these remote calls the plugin needs - * {@link SocketPermission} 'connect' to establish connections. This class wraps the operations requiring access in - * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. - */ -final class SocketAccess { - - private SocketAccess() {} - - public static T doPrivileged(PrivilegedAction operation) { - SpecialPermission.check(); - return AccessController.doPrivileged(operation); - } - - public static T doPrivilegedIOException(PrivilegedExceptionAction operation) throws IOException { - SpecialPermission.check(); - try { - return AccessController.doPrivileged(operation); - } catch (PrivilegedActionException e) { - throw (IOException) e.getCause(); - } - } - -} diff --git a/plugins/discovery-gce/src/internalClusterTest/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java b/plugins/discovery-gce/src/internalClusterTest/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java index 9e3a708dc3ba3..540c04b8b61b9 100644 --- a/plugins/discovery-gce/src/internalClusterTest/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java +++ b/plugins/discovery-gce/src/internalClusterTest/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cloud.gce.GceInstancesService; -import org.elasticsearch.cloud.gce.util.Access; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; @@ -138,23 +137,21 @@ protected GceInstancesService createGceInstancesService() { return new GceInstancesService() { @Override public Collection instances() { - return Access.doPrivileged(() -> { - final List instances = new ArrayList<>(); + final List instances = new ArrayList<>(); - for (DiscoveryNode discoveryNode : nodes.values()) { - Instance instance = new Instance(); - instance.setName(discoveryNode.getName()); - instance.setStatus("STARTED"); + for (DiscoveryNode discoveryNode : nodes.values()) { + Instance instance = new Instance(); + instance.setName(discoveryNode.getName()); + instance.setStatus("STARTED"); - NetworkInterface networkInterface = new NetworkInterface(); - networkInterface.setNetworkIP(discoveryNode.getAddress().toString()); - instance.setNetworkInterfaces(singletonList(networkInterface)); + NetworkInterface networkInterface = new NetworkInterface(); + networkInterface.setNetworkIP(discoveryNode.getAddress().toString()); + instance.setNetworkInterfaces(singletonList(networkInterface)); - instances.add(instance); - } + instances.add(instance); + } - return instances; - }); + return instances; } @Override diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java index 7db88b680d0c0..c57de907611d0 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java @@ -25,7 +25,6 @@ import com.google.api.services.compute.model.Instance; import com.google.api.services.compute.model.InstanceList; -import org.elasticsearch.cloud.gce.util.Access; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -68,19 +67,17 @@ public Collection instances() { try { // hack around code messiness in GCE code // TODO: get this fixed - return Access.doPrivilegedIOException(() -> { - String nextPageToken = null; - List zoneInstances = new ArrayList<>(); - do { - Compute.Instances.List list = client().instances().list(project, zoneId).setPageToken(nextPageToken); - InstanceList instanceList = list.execute(); - nextPageToken = instanceList.getNextPageToken(); - if (instanceList.isEmpty() == false && instanceList.getItems() != null) { - zoneInstances.addAll(instanceList.getItems()); - } - } while (nextPageToken != null); - return zoneInstances; - }); + String nextPageToken = null; + List zoneInstances = new ArrayList<>(); + do { + Compute.Instances.List list = client().instances().list(project, zoneId).setPageToken(nextPageToken); + InstanceList instanceList = list.execute(); + nextPageToken = instanceList.getNextPageToken(); + if (instanceList.isEmpty() == false && instanceList.getItems() != null) { + zoneInstances.addAll(instanceList.getItems()); + } + } while (nextPageToken != null); + return zoneInstances; } catch (IOException e) { logger.warn(() -> "Problem fetching instance list for zone " + zoneId, e); logger.debug("Full exception:", e); @@ -152,7 +149,7 @@ private List resolveZones() { String getAppEngineValueFromMetadataServer(String serviceURL) throws GeneralSecurityException, IOException { String metadata = GceMetadataService.GCE_HOST.get(settings); - GenericUrl url = Access.doPrivileged(() -> new GenericUrl(metadata + serviceURL)); + GenericUrl url = new GenericUrl(metadata + serviceURL); HttpTransport httpTransport = getGceHttpTransport(); HttpRequestFactory requestFactory = httpTransport.createRequestFactory(); @@ -160,7 +157,7 @@ String getAppEngineValueFromMetadataServer(String serviceURL) throws GeneralSecu .setConnectTimeout(500) .setReadTimeout(500) .setHeaders(new HttpHeaders().set("Metadata-Flavor", "Google")); - HttpResponse response = Access.doPrivilegedIOException(() -> request.execute()); + HttpResponse response = request.execute(); return headerContainsMetadataFlavor(response) ? response.parseAsString() : null; } @@ -211,7 +208,7 @@ public synchronized Compute client() { // hack around code messiness in GCE code // TODO: get this fixed - Access.doPrivilegedIOException(credential::refreshToken); + credential.refreshToken(); logger.debug("token [{}] will expire in [{}] s", credential.getAccessToken(), credential.getExpiresInSeconds()); if (credential.getExpiresInSeconds() != null) { diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java index 25ee9b23c3f51..526ef9f21fe82 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java @@ -15,7 +15,6 @@ import com.google.api.client.http.HttpResponse; import com.google.api.client.http.HttpTransport; -import org.elasticsearch.cloud.gce.util.Access; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -66,14 +65,12 @@ public String metadata(String metadataPath) throws IOException, URISyntaxExcepti try { // hack around code messiness in GCE code // TODO: get this fixed - headers = Access.doPrivileged(HttpHeaders::new); - GenericUrl genericUrl = Access.doPrivileged(() -> new GenericUrl(urlMetadataNetwork)); + headers = new HttpHeaders(); + GenericUrl genericUrl = new GenericUrl(urlMetadataNetwork); // This is needed to query meta data: https://cloud.google.com/compute/docs/metadata headers.put("Metadata-Flavor", "Google"); - HttpResponse response = Access.doPrivilegedIOException( - () -> getGceHttpTransport().createRequestFactory().buildGetRequest(genericUrl).setHeaders(headers).execute() - ); + HttpResponse response = getGceHttpTransport().createRequestFactory().buildGetRequest(genericUrl).setHeaders(headers).execute(); String metadata = response.parseAsString(); logger.debug("metadata found [{}]", metadata); return metadata; diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/network/GceNameResolver.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/network/GceNameResolver.java index debf8ecb36b28..15b4f96ae765f 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/network/GceNameResolver.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/network/GceNameResolver.java @@ -10,12 +10,12 @@ package org.elasticsearch.cloud.gce.network; import org.elasticsearch.cloud.gce.GceMetadataService; -import org.elasticsearch.cloud.gce.util.Access; import org.elasticsearch.common.Strings; import org.elasticsearch.common.network.NetworkService.CustomNameResolver; import java.io.IOException; import java.net.InetAddress; +import java.net.URISyntaxException; /** *

Resolves certain GCE related 'meta' hostnames into an actual hostname @@ -97,13 +97,13 @@ private InetAddress[] resolve(String value) throws IOException { } try { - String metadataResult = Access.doPrivilegedIOException(() -> gceMetadataService.metadata(gceMetadataPath)); + String metadataResult = gceMetadataService.metadata(gceMetadataPath); if (metadataResult == null || metadataResult.length() == 0) { throw new IOException("no gce metadata returned from [" + gceMetadataPath + "] for [" + value + "]"); } // only one address: because we explicitly ask for only one via the GceHostnameType return new InetAddress[] { InetAddress.getByName(metadataResult) }; - } catch (IOException e) { + } catch (URISyntaxException | IOException e) { throw new IOException("IOException caught when fetching InetAddress from [" + gceMetadataPath + "]", e); } } diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/util/Access.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/util/Access.java deleted file mode 100644 index 8d2134b907e14..0000000000000 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/util/Access.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.cloud.gce.util; - -import org.elasticsearch.SpecialPermission; - -import java.io.IOException; -import java.net.SocketPermission; -import java.security.AccessController; -import java.security.PrivilegedAction; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; - -/** - * GCE's HTTP client changes access levels. Specifically it needs {@link RuntimePermission} {@code - * accessDeclaredMembers} and {@code setFactory}, and {@link java.lang.reflect.ReflectPermission} - * {@code suppressAccessChecks}. For remote calls, the plugin needs {@link SocketPermission} for - * {@code connect}. This class wraps the operations requiring access in - * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. - */ -public final class Access { - - private Access() {} - - public static T doPrivileged(final PrivilegedAction operation) { - SpecialPermission.check(); - return AccessController.doPrivileged(operation); - } - - public static void doPrivilegedVoid(final Runnable action) { - SpecialPermission.check(); - AccessController.doPrivileged((PrivilegedAction) () -> { - action.run(); - return null; - }); - } - - public static T doPrivilegedIOException(final PrivilegedExceptionAction operation) throws IOException { - SpecialPermission.check(); - try { - return AccessController.doPrivileged(operation); - } catch (final PrivilegedActionException e) { - throw (IOException) e.getCause(); - } - } - -} diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/RetryHttpInitializerWrapper.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/RetryHttpInitializerWrapper.java index a41eec89fe7f8..427e3220ce9f2 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/RetryHttpInitializerWrapper.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/RetryHttpInitializerWrapper.java @@ -20,7 +20,6 @@ import com.google.api.client.util.ExponentialBackOff; import com.google.api.client.util.Sleeper; -import org.elasticsearch.cloud.gce.util.Access; import org.elasticsearch.core.TimeValue; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; @@ -60,7 +59,7 @@ public RetryHttpInitializerWrapper(Credential wrappedCredential, TimeValue maxWa // Use only for testing static MockGoogleCredential.Builder newMockCredentialBuilder() { // TODO: figure out why GCE is so bad like this - return Access.doPrivileged(MockGoogleCredential.Builder::new); + return new MockGoogleCredential.Builder(); } @Override diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java index 735d74b767bb9..aa2ca3914ea1d 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java @@ -9,15 +9,11 @@ package org.elasticsearch.plugin.discovery.gce; -import com.google.api.client.http.HttpHeaders; -import com.google.api.client.util.ClassInfo; - import org.apache.lucene.util.SetOnce; import org.elasticsearch.cloud.gce.GceInstancesService; import org.elasticsearch.cloud.gce.GceInstancesServiceImpl; import org.elasticsearch.cloud.gce.GceMetadataService; import org.elasticsearch.cloud.gce.network.GceNameResolver; -import org.elasticsearch.cloud.gce.util.Access; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -53,18 +49,6 @@ public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close // stashed when created in order to properly close private final SetOnce gceInstancesService = new SetOnce<>(); - static { - /* - * GCE's http client changes access levels because its silly and we - * can't allow that on any old stack so we pull it here, up front, - * so we can cleanly check the permissions for it. Without this changing - * the permission can fail if any part of core is on the stack because - * our plugin permissions don't allow core to "reach through" plugins to - * change the permission. Because that'd be silly. - */ - Access.doPrivilegedVoid(() -> ClassInfo.of(HttpHeaders.class, true)); - } - public GceDiscoveryPlugin(Settings settings) { this.settings = settings; logger.trace("starting gce discovery plugin..."); diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java index 54a8559647583..b1342a63c71a4 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java @@ -32,7 +32,6 @@ import org.elasticsearch.repositories.hdfs.HdfsBlobStore.Operation; import java.io.FileNotFoundException; -import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -113,9 +112,7 @@ public InputStream readBlob(OperationPurpose purpose, String blobName) throws IO // HDFSPrivilegedInputSteam which will ensure that underlying methods will // be called with the proper privileges. try { - return store.execute( - fileContext -> new HDFSPrivilegedInputSteam(fileContext.open(new Path(path, blobName), bufferSize), securityContext) - ); + return store.execute(fileContext -> fileContext.open(new Path(path, blobName), bufferSize)); } catch (FileNotFoundException fnfe) { throw new NoSuchFileException("[" + blobName + "] blob not found"); } @@ -134,7 +131,7 @@ public InputStream readBlob(OperationPurpose purpose, String blobName, long posi // should direct the datanode to start on the appropriate block, at the // appropriate target position. fsInput.seek(position); - return Streams.limitStream(new HDFSPrivilegedInputSteam(fsInput, securityContext), length); + return Streams.limitStream(fsInput, length); }); } catch (FileNotFoundException fnfe) { throw new NoSuchFileException("[" + blobName + "] blob not found"); @@ -326,48 +323,6 @@ public Map children(OperationPurpose purpose) throws IOEx return Collections.unmodifiableMap(map); } - /** - * Exists to wrap underlying InputStream methods that might make socket connections in - * doPrivileged blocks. This is due to the way that hdfs client libraries might open - * socket connections when you are reading from an InputStream. - */ - private static class HDFSPrivilegedInputSteam extends FilterInputStream { - - private final HdfsSecurityContext securityContext; - - HDFSPrivilegedInputSteam(InputStream in, HdfsSecurityContext hdfsSecurityContext) { - super(in); - this.securityContext = hdfsSecurityContext; - } - - public int read() throws IOException { - return securityContext.doPrivilegedOrThrow(in::read); - } - - public int read(byte b[]) throws IOException { - return securityContext.doPrivilegedOrThrow(() -> in.read(b)); - } - - public int read(byte b[], int off, int len) throws IOException { - return securityContext.doPrivilegedOrThrow(() -> in.read(b, off, len)); - } - - public long skip(long n) throws IOException { - return securityContext.doPrivilegedOrThrow(() -> in.skip(n)); - } - - public int available() throws IOException { - return securityContext.doPrivilegedOrThrow(() -> in.available()); - } - - public synchronized void reset() throws IOException { - securityContext.doPrivilegedOrThrow(() -> { - in.reset(); - return null; - }); - } - } - @Override public void compareAndExchangeRegister( OperationPurpose purpose, diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java index e817384d95c04..af87496302e8e 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java @@ -36,9 +36,7 @@ final class HdfsBlobStore implements BlobStore { HdfsBlobStore(FileContext fileContext, String path, int bufferSize, boolean readOnly, boolean haEnabled, Short replicationFactor) throws IOException { this.fileContext = fileContext; - // Only restrict permissions if not running with HA - boolean restrictPermissions = (haEnabled == false); - this.securityContext = new HdfsSecurityContext(fileContext.getUgi(), restrictPermissions); + this.securityContext = new HdfsSecurityContext(fileContext.getUgi()); this.bufferSize = bufferSize; this.replicationFactor = replicationFactor; this.root = execute(fileContext1 -> fileContext1.makeQualified(new Path(path))); @@ -103,10 +101,8 @@ V execute(Operation operation) throws IOException { if (closed) { throw new AlreadyClosedException("HdfsBlobStore is closed: " + this); } - return securityContext.doPrivilegedOrThrow(() -> { - securityContext.ensureLogin(); - return operation.run(fileContext); - }); + securityContext.ensureLogin(); + return operation.run(fileContext); } @Override diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsPlugin.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsPlugin.java index ff63d3a1799e5..939317c432d2f 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsPlugin.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsPlugin.java @@ -11,10 +11,8 @@ import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB; import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.SecurityUtil; -import org.elasticsearch.SpecialPermission; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.env.Environment; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; @@ -23,59 +21,17 @@ import org.elasticsearch.repositories.Repository; import org.elasticsearch.xcontent.NamedXContentRegistry; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.AccessController; -import java.security.PrivilegedAction; import java.util.Collections; import java.util.Map; public final class HdfsPlugin extends Plugin implements RepositoryPlugin { - // initialize some problematic classes with elevated privileges + // initialize some problematic classes static { - SpecialPermission.check(); - AccessController.doPrivileged((PrivilegedAction) HdfsPlugin::evilHadoopInit); - AccessController.doPrivileged((PrivilegedAction) HdfsPlugin::eagerInit); + eagerInitSecurityUtil(); } - @SuppressForbidden(reason = "Needs a security hack for hadoop on windows, until HADOOP-XXXX is fixed") - private static Void evilHadoopInit() { - // hack: on Windows, Shell's clinit has a similar problem that on unix, - // but here we can workaround it for now by setting hadoop home - // on unix: we still want to set this to something we control, because - // if the user happens to have HADOOP_HOME in their environment -> checkHadoopHome goes boom - // TODO: remove THIS when hadoop is fixed - Path hadoopHome = null; - String oldValue = null; - try { - hadoopHome = Files.createTempDirectory("hadoop").toAbsolutePath(); - oldValue = System.setProperty("hadoop.home.dir", hadoopHome.toString()); - Class.forName("org.apache.hadoop.security.UserGroupInformation"); - Class.forName("org.apache.hadoop.util.StringUtils"); - Class.forName("org.apache.hadoop.util.ShutdownHookManager"); - Class.forName("org.apache.hadoop.conf.Configuration"); - } catch (ClassNotFoundException | IOException e) { - throw new RuntimeException(e); - } finally { - // try to clean up the hack - if (oldValue == null) { - System.clearProperty("hadoop.home.dir"); - } else { - System.setProperty("hadoop.home.dir", oldValue); - } - try { - // try to clean up our temp dir too if we can - if (hadoopHome != null) { - Files.delete(hadoopHome); - } - } catch (IOException thisIsBestEffort) {} - } - return null; - } - - private static Void eagerInit() { + private static Void eagerInitSecurityUtil() { /* * Hadoop RPC wire serialization uses ProtocolBuffers. All proto classes for Hadoop * come annotated with configurations that denote information about if they support diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java index 2a1847d65b70b..8f250d00f8d9a 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java @@ -17,7 +17,6 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; -import org.elasticsearch.SpecialPermission; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; @@ -39,7 +38,6 @@ import java.net.InetAddress; import java.net.URI; import java.net.UnknownHostException; -import java.security.AccessController; import java.security.PrivilegedAction; import java.util.Locale; @@ -281,12 +279,7 @@ private static String getHostName() { @Override protected HdfsBlobStore createBlobStore() { - // initialize our blobstore using elevated privileges. - SpecialPermission.check(); - final HdfsBlobStore blobStore = AccessController.doPrivileged( - (PrivilegedAction) () -> createBlobstore(uri, pathSetting, getMetadata().settings()) - ); - return blobStore; + return createBlobstore(uri, pathSetting, getMetadata().settings()); } @Override diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java index e74d1a87959f2..26930b436524e 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java @@ -9,73 +9,20 @@ package org.elasticsearch.repositories.hdfs; import org.apache.hadoop.security.UserGroupInformation; -import org.elasticsearch.SpecialPermission; import org.elasticsearch.env.Environment; import java.io.IOException; import java.io.UncheckedIOException; -import java.lang.reflect.ReflectPermission; -import java.net.SocketPermission; import java.nio.file.Files; import java.nio.file.Path; -import java.security.AccessController; -import java.security.Permission; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; -import java.util.Arrays; - -import javax.security.auth.AuthPermission; -import javax.security.auth.PrivateCredentialPermission; -import javax.security.auth.kerberos.ServicePermission; /** * Oversees all the security specific logic for the HDFS Repository plugin. * - * Keeps track of the current user for a given repository, as well as which - * permissions to grant the blob store restricted execution methods. + * Keeps track of the current user for a given repository. */ class HdfsSecurityContext { - private static final Permission[] SIMPLE_AUTH_PERMISSIONS; - private static final Permission[] KERBEROS_AUTH_PERMISSIONS; - static { - // We can do FS ops with only a few elevated permissions: - SIMPLE_AUTH_PERMISSIONS = new Permission[] { - new SocketPermission("*", "connect"), - // 1) hadoop dynamic proxy is messy with access rules - new ReflectPermission("suppressAccessChecks"), - // 2) allow hadoop to add credentials to our Subject - new AuthPermission("modifyPrivateCredentials"), - // 3) RPC Engine requires this for re-establishing pooled connections over the lifetime of the client - new PrivateCredentialPermission("org.apache.hadoop.security.Credentials * \"*\"", "read"), - new RuntimePermission("getClassLoader") }; - - // If Security is enabled, we need all the following elevated permissions: - KERBEROS_AUTH_PERMISSIONS = new Permission[] { - new SocketPermission("*", "connect"), - // 1) hadoop dynamic proxy is messy with access rules - new ReflectPermission("suppressAccessChecks"), - // 2) allow hadoop to add credentials to our Subject - new AuthPermission("modifyPrivateCredentials"), - // 3) allow hadoop to act as the logged in Subject - new AuthPermission("doAs"), - // 4) Listen and resolve permissions for kerberos server principals - new SocketPermission("localhost:0", "listen,resolve"), - // We add the following since hadoop requires the client to re-login when the kerberos ticket expires: - // 5) All the permissions needed for UGI to do its weird JAAS hack - new RuntimePermission("getClassLoader"), - new RuntimePermission("setContextClassLoader"), - // 6) Additional permissions for the login modules - new AuthPermission("modifyPrincipals"), - new PrivateCredentialPermission("org.apache.hadoop.security.Credentials * \"*\"", "read"), - new PrivateCredentialPermission("javax.security.auth.kerberos.KerberosTicket * \"*\"", "read"), - new PrivateCredentialPermission("javax.security.auth.kerberos.KeyTab * \"*\"", "read") - // Included later: - // 7) allow code to initiate kerberos connections as the logged in user - // Still far and away fewer permissions than the original full plugin policy - }; - } - /** * Locates the keytab file in the environment and verifies that it exists. * Expects keytab file to exist at {@code $CONFIG_DIR$/repository-hdfs/krb5.keytab} @@ -93,50 +40,9 @@ static Path locateKeytabFile(Environment environment) { } private final UserGroupInformation ugi; - private final boolean restrictPermissions; - private final Permission[] restrictedExecutionPermissions; - HdfsSecurityContext(UserGroupInformation ugi, boolean restrictPermissions) { + HdfsSecurityContext(UserGroupInformation ugi) { this.ugi = ugi; - this.restrictPermissions = restrictPermissions; - this.restrictedExecutionPermissions = renderPermissions(ugi); - } - - private Permission[] renderPermissions(UserGroupInformation userGroupInformation) { - Permission[] permissions; - if (userGroupInformation.isFromKeytab()) { - // KERBEROS - // Leave room to append one extra permission based on the logged in user's info. - int permlen = KERBEROS_AUTH_PERMISSIONS.length + 1; - permissions = new Permission[permlen]; - - System.arraycopy(KERBEROS_AUTH_PERMISSIONS, 0, permissions, 0, KERBEROS_AUTH_PERMISSIONS.length); - - // Append a kerberos.ServicePermission to only allow initiating kerberos connections - // as the logged in user. - permissions[permissions.length - 1] = new ServicePermission(userGroupInformation.getUserName(), "initiate"); - } else { - // SIMPLE - permissions = Arrays.copyOf(SIMPLE_AUTH_PERMISSIONS, SIMPLE_AUTH_PERMISSIONS.length); - } - return permissions; - } - - private Permission[] getRestrictedExecutionPermissions() { - return restrictedExecutionPermissions; - } - - T doPrivilegedOrThrow(PrivilegedExceptionAction action) throws IOException { - SpecialPermission.check(); - try { - if (restrictPermissions) { - return AccessController.doPrivileged(action, null, this.getRestrictedExecutionPermissions()); - } else { - return AccessController.doPrivileged(action); - } - } catch (PrivilegedActionException e) { - throw (IOException) e.getCause(); - } } void ensureLogin() {